diff options
Diffstat (limited to 'fs/fs-writeback.c')
| -rw-r--r-- | fs/fs-writeback.c | 100 |
1 files changed, 50 insertions, 50 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index caf049146ca2..c54226be5294 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
| @@ -278,7 +278,26 @@ int sb_has_dirty_inodes(struct super_block *sb) | |||
| 278 | EXPORT_SYMBOL(sb_has_dirty_inodes); | 278 | EXPORT_SYMBOL(sb_has_dirty_inodes); |
| 279 | 279 | ||
| 280 | /* | 280 | /* |
| 281 | * Write a single inode's dirty pages and inode data out to disk. | 281 | * Wait for writeback on an inode to complete. |
| 282 | */ | ||
| 283 | static void inode_wait_for_writeback(struct inode *inode) | ||
| 284 | { | ||
| 285 | DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); | ||
| 286 | wait_queue_head_t *wqh; | ||
| 287 | |||
| 288 | wqh = bit_waitqueue(&inode->i_state, __I_SYNC); | ||
| 289 | do { | ||
| 290 | spin_unlock(&inode_lock); | ||
| 291 | __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); | ||
| 292 | spin_lock(&inode_lock); | ||
| 293 | } while (inode->i_state & I_SYNC); | ||
| 294 | } | ||
| 295 | |||
| 296 | /* | ||
| 297 | * Write out an inode's dirty pages. Called under inode_lock. Either the | ||
| 298 | * caller has ref on the inode (either via __iget or via syscall against an fd) | ||
| 299 | * or the inode has I_WILL_FREE set (via generic_forget_inode) | ||
| 300 | * | ||
| 282 | * If `wait' is set, wait on the writeout. | 301 | * If `wait' is set, wait on the writeout. |
| 283 | * | 302 | * |
| 284 | * The whole writeout design is quite complex and fragile. We want to avoid | 303 | * The whole writeout design is quite complex and fragile. We want to avoid |
| @@ -288,13 +307,38 @@ EXPORT_SYMBOL(sb_has_dirty_inodes); | |||
| 288 | * Called under inode_lock. | 307 | * Called under inode_lock. |
| 289 | */ | 308 | */ |
| 290 | static int | 309 | static int |
| 291 | __sync_single_inode(struct inode *inode, struct writeback_control *wbc) | 310 | writeback_single_inode(struct inode *inode, struct writeback_control *wbc) |
| 292 | { | 311 | { |
| 293 | unsigned dirty; | ||
| 294 | struct address_space *mapping = inode->i_mapping; | 312 | struct address_space *mapping = inode->i_mapping; |
| 295 | int wait = wbc->sync_mode == WB_SYNC_ALL; | 313 | int wait = wbc->sync_mode == WB_SYNC_ALL; |
| 314 | unsigned dirty; | ||
| 296 | int ret; | 315 | int ret; |
| 297 | 316 | ||
| 317 | if (!atomic_read(&inode->i_count)) | ||
| 318 | WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); | ||
| 319 | else | ||
| 320 | WARN_ON(inode->i_state & I_WILL_FREE); | ||
| 321 | |||
| 322 | if (inode->i_state & I_SYNC) { | ||
| 323 | /* | ||
| 324 | * If this inode is locked for writeback and we are not doing | ||
| 325 | * writeback-for-data-integrity, move it to s_more_io so that | ||
| 326 | * writeback can proceed with the other inodes on s_io. | ||
| 327 | * | ||
| 328 | * We'll have another go at writing back this inode when we | ||
| 329 | * completed a full scan of s_io. | ||
| 330 | */ | ||
| 331 | if (!wait) { | ||
| 332 | requeue_io(inode); | ||
| 333 | return 0; | ||
| 334 | } | ||
| 335 | |||
| 336 | /* | ||
| 337 | * It's a data-integrity sync. We must wait. | ||
| 338 | */ | ||
| 339 | inode_wait_for_writeback(inode); | ||
| 340 | } | ||
| 341 | |||
| 298 | BUG_ON(inode->i_state & I_SYNC); | 342 | BUG_ON(inode->i_state & I_SYNC); |
| 299 | 343 | ||
| 300 | /* Set I_SYNC, reset I_DIRTY */ | 344 | /* Set I_SYNC, reset I_DIRTY */ |
| @@ -390,50 +434,6 @@ __sync_single_inode(struct inode *inode, struct writeback_control *wbc) | |||
| 390 | } | 434 | } |
| 391 | 435 | ||
| 392 | /* | 436 | /* |
| 393 | * Write out an inode's dirty pages. Called under inode_lock. Either the | ||
| 394 | * caller has ref on the inode (either via __iget or via syscall against an fd) | ||
| 395 | * or the inode has I_WILL_FREE set (via generic_forget_inode) | ||
| 396 | */ | ||
| 397 | static int | ||
| 398 | __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) | ||
| 399 | { | ||
| 400 | wait_queue_head_t *wqh; | ||
| 401 | |||
| 402 | if (!atomic_read(&inode->i_count)) | ||
| 403 | WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); | ||
| 404 | else | ||
| 405 | WARN_ON(inode->i_state & I_WILL_FREE); | ||
| 406 | |||
| 407 | if ((wbc->sync_mode != WB_SYNC_ALL) && (inode->i_state & I_SYNC)) { | ||
| 408 | /* | ||
| 409 | * We're skipping this inode because it's locked, and we're not | ||
| 410 | * doing writeback-for-data-integrity. Move it to s_more_io so | ||
| 411 | * that writeback can proceed with the other inodes on s_io. | ||
| 412 | * We'll have another go at writing back this inode when we | ||
| 413 | * completed a full scan of s_io. | ||
| 414 | */ | ||
| 415 | requeue_io(inode); | ||
| 416 | return 0; | ||
| 417 | } | ||
| 418 | |||
| 419 | /* | ||
| 420 | * It's a data-integrity sync. We must wait. | ||
| 421 | */ | ||
| 422 | if (inode->i_state & I_SYNC) { | ||
| 423 | DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); | ||
| 424 | |||
| 425 | wqh = bit_waitqueue(&inode->i_state, __I_SYNC); | ||
| 426 | do { | ||
| 427 | spin_unlock(&inode_lock); | ||
| 428 | __wait_on_bit(wqh, &wq, inode_wait, | ||
| 429 | TASK_UNINTERRUPTIBLE); | ||
| 430 | spin_lock(&inode_lock); | ||
| 431 | } while (inode->i_state & I_SYNC); | ||
| 432 | } | ||
| 433 | return __sync_single_inode(inode, wbc); | ||
| 434 | } | ||
| 435 | |||
| 436 | /* | ||
| 437 | * Write out a superblock's list of dirty inodes. A wait will be performed | 437 | * Write out a superblock's list of dirty inodes. A wait will be performed |
| 438 | * upon no inodes, all inodes or the final one, depending upon sync_mode. | 438 | * upon no inodes, all inodes or the final one, depending upon sync_mode. |
| 439 | * | 439 | * |
| @@ -526,7 +526,7 @@ void generic_sync_sb_inodes(struct super_block *sb, | |||
| 526 | BUG_ON(inode->i_state & (I_FREEING | I_CLEAR)); | 526 | BUG_ON(inode->i_state & (I_FREEING | I_CLEAR)); |
| 527 | __iget(inode); | 527 | __iget(inode); |
| 528 | pages_skipped = wbc->pages_skipped; | 528 | pages_skipped = wbc->pages_skipped; |
| 529 | __writeback_single_inode(inode, wbc); | 529 | writeback_single_inode(inode, wbc); |
| 530 | if (current_is_pdflush()) | 530 | if (current_is_pdflush()) |
| 531 | writeback_release(bdi); | 531 | writeback_release(bdi); |
| 532 | if (wbc->pages_skipped != pages_skipped) { | 532 | if (wbc->pages_skipped != pages_skipped) { |
| @@ -708,7 +708,7 @@ int write_inode_now(struct inode *inode, int sync) | |||
| 708 | 708 | ||
| 709 | might_sleep(); | 709 | might_sleep(); |
| 710 | spin_lock(&inode_lock); | 710 | spin_lock(&inode_lock); |
| 711 | ret = __writeback_single_inode(inode, &wbc); | 711 | ret = writeback_single_inode(inode, &wbc); |
| 712 | spin_unlock(&inode_lock); | 712 | spin_unlock(&inode_lock); |
| 713 | if (sync) | 713 | if (sync) |
| 714 | inode_sync_wait(inode); | 714 | inode_sync_wait(inode); |
| @@ -732,7 +732,7 @@ int sync_inode(struct inode *inode, struct writeback_control *wbc) | |||
| 732 | int ret; | 732 | int ret; |
| 733 | 733 | ||
| 734 | spin_lock(&inode_lock); | 734 | spin_lock(&inode_lock); |
| 735 | ret = __writeback_single_inode(inode, wbc); | 735 | ret = writeback_single_inode(inode, wbc); |
| 736 | spin_unlock(&inode_lock); | 736 | spin_unlock(&inode_lock); |
| 737 | return ret; | 737 | return ret; |
| 738 | } | 738 | } |
