aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fs-writeback.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2012-05-03 08:47:58 -0400
committerFengguang Wu <fengguang.wu@intel.com>2012-05-06 01:43:39 -0400
commitccb26b5a65867839d95156e02ea4861f64a8cbf3 (patch)
tree36ee626d4525171d6a8bb2e4582bd9e0a811bb2d /fs/fs-writeback.c
parent6290be1c1dc6589eeda213aa40946b27fa4faac8 (diff)
writeback: Separate inode requeueing after writeback
Move inode requeueing after inode has been written out into a separate function. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Fengguang Wu <fengguang.wu@intel.com>
Diffstat (limited to 'fs/fs-writeback.c')
-rw-r--r--fs/fs-writeback.c102
1 files changed, 55 insertions, 47 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 3804a10f2be7..5d3de002cb8e 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -345,6 +345,60 @@ static void inode_wait_for_writeback(struct inode *inode,
345} 345}
346 346
347/* 347/*
348 * Find proper writeback list for the inode depending on its current state and
349 * possibly also change of its state while we were doing writeback. Here we
350 * handle things such as livelock prevention or fairness of writeback among
351 * inodes. This function can be called only by flusher thread - noone else
352 * processes all inodes in writeback lists and requeueing inodes behind flusher
353 * thread's back can have unexpected consequences.
354 */
355static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
356 struct writeback_control *wbc)
357{
358 if (inode->i_state & I_FREEING)
359 return;
360
361 /*
362 * Sync livelock prevention. Each inode is tagged and synced in one
363 * shot. If still dirty, it will be redirty_tail()'ed below. Update
364 * the dirty time to prevent enqueue and sync it again.
365 */
366 if ((inode->i_state & I_DIRTY) &&
367 (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
368 inode->dirtied_when = jiffies;
369
370 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
371 /*
372 * We didn't write back all the pages. nfs_writepages()
373 * sometimes bales out without doing anything.
374 */
375 if (wbc->nr_to_write <= 0) {
376 /* Slice used up. Queue for next turn. */
377 requeue_io(inode, wb);
378 } else {
379 /*
380 * Writeback blocked by something other than
381 * congestion. Delay the inode for some time to
382 * avoid spinning on the CPU (100% iowait)
383 * retrying writeback of the dirty page/inode
384 * that cannot be performed immediately.
385 */
386 redirty_tail(inode, wb);
387 }
388 } else if (inode->i_state & I_DIRTY) {
389 /*
390 * Filesystems can dirty the inode during writeback operations,
391 * such as delayed allocation during submission or metadata
392 * updates after data IO completion.
393 */
394 redirty_tail(inode, wb);
395 } else {
396 /* The inode is clean. Remove from writeback lists. */
397 list_del_init(&inode->i_wb_list);
398 }
399}
400
401/*
348 * Write out an inode's dirty pages. Called under wb->list_lock and 402 * Write out an inode's dirty pages. Called under wb->list_lock and
349 * inode->i_lock. Either the caller has an active reference on the inode or 403 * inode->i_lock. Either the caller has an active reference on the inode or
350 * the inode has I_WILL_FREE set. 404 * the inode has I_WILL_FREE set.
@@ -422,53 +476,7 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
422 476
423 spin_lock(&wb->list_lock); 477 spin_lock(&wb->list_lock);
424 spin_lock(&inode->i_lock); 478 spin_lock(&inode->i_lock);
425 if (!(inode->i_state & I_FREEING)) { 479 requeue_inode(inode, wb, wbc);
426 /*
427 * Sync livelock prevention. Each inode is tagged and synced in
428 * one shot. If still dirty, it will be redirty_tail()'ed below.
429 * Update the dirty time to prevent enqueue and sync it again.
430 */
431 if ((inode->i_state & I_DIRTY) &&
432 (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
433 inode->dirtied_when = jiffies;
434
435 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
436 /*
437 * We didn't write back all the pages. nfs_writepages()
438 * sometimes bales out without doing anything.
439 */
440 if (wbc->nr_to_write <= 0) {
441 /*
442 * slice used up: queue for next turn
443 */
444 requeue_io(inode, wb);
445 } else {
446 /*
447 * Writeback blocked by something other than
448 * congestion. Delay the inode for some time to
449 * avoid spinning on the CPU (100% iowait)
450 * retrying writeback of the dirty page/inode
451 * that cannot be performed immediately.
452 */
453 redirty_tail(inode, wb);
454 }
455 } else if (inode->i_state & I_DIRTY) {
456 /*
457 * Filesystems can dirty the inode during writeback
458 * operations, such as delayed allocation during
459 * submission or metadata updates after data IO
460 * completion.
461 */
462 redirty_tail(inode, wb);
463 } else {
464 /*
465 * The inode is clean. At this point we either have
466 * a reference to the inode or it's on it's way out.
467 * No need to add it back to the LRU.
468 */
469 list_del_init(&inode->i_wb_list);
470 }
471 }
472 inode_sync_complete(inode); 480 inode_sync_complete(inode);
473 trace_writeback_single_inode(inode, wbc, nr_to_write); 481 trace_writeback_single_inode(inode, wbc, nr_to_write);
474 return ret; 482 return ret;