diff options
-rw-r--r-- | fs/fs-writeback.c | 49 |
1 files changed, 27 insertions, 22 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index f8cd7a97f5b7..59b3ee63b624 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -149,21 +149,19 @@ static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work) | |||
149 | 149 | ||
150 | static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work) | 150 | static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work) |
151 | { | 151 | { |
152 | if (work) { | 152 | work->seen = bdi->wb_mask; |
153 | work->seen = bdi->wb_mask; | 153 | BUG_ON(!work->seen); |
154 | BUG_ON(!work->seen); | 154 | atomic_set(&work->pending, bdi->wb_cnt); |
155 | atomic_set(&work->pending, bdi->wb_cnt); | 155 | BUG_ON(!bdi->wb_cnt); |
156 | BUG_ON(!bdi->wb_cnt); | ||
157 | 156 | ||
158 | /* | 157 | /* |
159 | * Make sure stores are seen before it appears on the list | 158 | * Make sure stores are seen before it appears on the list |
160 | */ | 159 | */ |
161 | smp_mb(); | 160 | smp_mb(); |
162 | 161 | ||
163 | spin_lock(&bdi->wb_lock); | 162 | spin_lock(&bdi->wb_lock); |
164 | list_add_tail_rcu(&work->list, &bdi->work_list); | 163 | list_add_tail_rcu(&work->list, &bdi->work_list); |
165 | spin_unlock(&bdi->wb_lock); | 164 | spin_unlock(&bdi->wb_lock); |
166 | } | ||
167 | 165 | ||
168 | /* | 166 | /* |
169 | * If the default thread isn't there, make sure we add it. When | 167 | * If the default thread isn't there, make sure we add it. When |
@@ -175,14 +173,12 @@ static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work) | |||
175 | struct bdi_writeback *wb = &bdi->wb; | 173 | struct bdi_writeback *wb = &bdi->wb; |
176 | 174 | ||
177 | /* | 175 | /* |
178 | * If we failed allocating the bdi work item, wake up the wb | 176 | * End work now if this wb has no dirty IO pending. Otherwise |
179 | * thread always. As a safety precaution, it'll flush out | 177 | * wakeup the handling thread |
180 | * everything | ||
181 | */ | 178 | */ |
182 | if (!wb_has_dirty_io(wb)) { | 179 | if (!wb_has_dirty_io(wb)) |
183 | if (work) | 180 | wb_clear_pending(wb, work); |
184 | wb_clear_pending(wb, work); | 181 | else if (wb->task) |
185 | } else if (wb->task) | ||
186 | wake_up_process(wb->task); | 182 | wake_up_process(wb->task); |
187 | } | 183 | } |
188 | } | 184 | } |
@@ -202,11 +198,20 @@ static void bdi_alloc_queue_work(struct backing_dev_info *bdi, | |||
202 | { | 198 | { |
203 | struct bdi_work *work; | 199 | struct bdi_work *work; |
204 | 200 | ||
201 | /* | ||
202 | * This is WB_SYNC_NONE writeback, so if allocation fails just | ||
203 | * wakeup the thread for old dirty data writeback | ||
204 | */ | ||
205 | work = kmalloc(sizeof(*work), GFP_ATOMIC); | 205 | work = kmalloc(sizeof(*work), GFP_ATOMIC); |
206 | if (work) | 206 | if (work) { |
207 | bdi_work_init(work, wbc); | 207 | bdi_work_init(work, wbc); |
208 | bdi_queue_work(bdi, work); | ||
209 | } else { | ||
210 | struct bdi_writeback *wb = &bdi->wb; | ||
208 | 211 | ||
209 | bdi_queue_work(bdi, work); | 212 | if (wb->task) |
213 | wake_up_process(wb->task); | ||
214 | } | ||
210 | } | 215 | } |
211 | 216 | ||
212 | void bdi_start_writeback(struct writeback_control *wbc) | 217 | void bdi_start_writeback(struct writeback_control *wbc) |