aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fs-writeback.c
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-09-13 14:07:36 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-09-16 09:18:52 -0400
commitbcddc3f01c9122882c8b9f12ab94a934e55aef97 (patch)
treedb642f05ec083f49c4e32cf8d93c9f14028d8ec5 /fs/fs-writeback.c
parentcfc4ba5365449cb6b5c9f68d755a142f17da1e47 (diff)
writeback: inline allocation failure handling in bdi_alloc_queue_work()
This gets rid of work == NULL in bdi_queue_work() and puts the OOM handling where it belongs. Acked-by: Jan Kara <jack@suse.cz> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'fs/fs-writeback.c')
-rw-r--r--fs/fs-writeback.c49
1 files changed, 27 insertions, 22 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index f8cd7a97f5b7..59b3ee63b624 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -149,21 +149,19 @@ static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
149 149
150static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work) 150static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
151{ 151{
152 if (work) { 152 work->seen = bdi->wb_mask;
153 work->seen = bdi->wb_mask; 153 BUG_ON(!work->seen);
154 BUG_ON(!work->seen); 154 atomic_set(&work->pending, bdi->wb_cnt);
155 atomic_set(&work->pending, bdi->wb_cnt); 155 BUG_ON(!bdi->wb_cnt);
156 BUG_ON(!bdi->wb_cnt);
157 156
158 /* 157 /*
159 * Make sure stores are seen before it appears on the list 158 * Make sure stores are seen before it appears on the list
160 */ 159 */
161 smp_mb(); 160 smp_mb();
162 161
163 spin_lock(&bdi->wb_lock); 162 spin_lock(&bdi->wb_lock);
164 list_add_tail_rcu(&work->list, &bdi->work_list); 163 list_add_tail_rcu(&work->list, &bdi->work_list);
165 spin_unlock(&bdi->wb_lock); 164 spin_unlock(&bdi->wb_lock);
166 }
167 165
168 /* 166 /*
169 * If the default thread isn't there, make sure we add it. When 167 * If the default thread isn't there, make sure we add it. When
@@ -175,14 +173,12 @@ static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
175 struct bdi_writeback *wb = &bdi->wb; 173 struct bdi_writeback *wb = &bdi->wb;
176 174
177 /* 175 /*
178 * If we failed allocating the bdi work item, wake up the wb 176 * End work now if this wb has no dirty IO pending. Otherwise
179 * thread always. As a safety precaution, it'll flush out 177 * wakeup the handling thread
180 * everything
181 */ 178 */
182 if (!wb_has_dirty_io(wb)) { 179 if (!wb_has_dirty_io(wb))
183 if (work) 180 wb_clear_pending(wb, work);
184 wb_clear_pending(wb, work); 181 else if (wb->task)
185 } else if (wb->task)
186 wake_up_process(wb->task); 182 wake_up_process(wb->task);
187 } 183 }
188} 184}
@@ -202,11 +198,20 @@ static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
202{ 198{
203 struct bdi_work *work; 199 struct bdi_work *work;
204 200
201 /*
202 * This is WB_SYNC_NONE writeback, so if allocation fails just
203 * wakeup the thread for old dirty data writeback
204 */
205 work = kmalloc(sizeof(*work), GFP_ATOMIC); 205 work = kmalloc(sizeof(*work), GFP_ATOMIC);
206 if (work) 206 if (work) {
207 bdi_work_init(work, wbc); 207 bdi_work_init(work, wbc);
208 bdi_queue_work(bdi, work);
209 } else {
210 struct bdi_writeback *wb = &bdi->wb;
208 211
209 bdi_queue_work(bdi, work); 212 if (wb->task)
213 wake_up_process(wb->task);
214 }
210} 215}
211 216
212void bdi_start_writeback(struct writeback_control *wbc) 217void bdi_start_writeback(struct writeback_control *wbc)