diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-05-07 05:17:13 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-07 05:17:34 -0400 |
commit | 44347d947f628060b92449702071bfe1d31dfb75 (patch) | |
tree | c6ed74610d5b3295df4296659f80f5feb94b28cc /fs/buffer.c | |
parent | d94fc523f3c35bd8013f04827e94756cbc0212f4 (diff) | |
parent | 413f81eba35d6ede9289b0c8a920c013a84fac71 (diff) |
Merge branch 'linus' into tracing/core
Merge reason: tracing/core was on a .30-rc1 base and was missing out on
on a handful of tracing fixes present in .30-rc5-almost.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'fs/buffer.c')
-rw-r--r-- | fs/buffer.c | 66 |
1 files changed, 49 insertions, 17 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 13edf7ad3ff1..aed297739eb0 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -360,7 +360,7 @@ still_busy: | |||
360 | * Completion handler for block_write_full_page() - pages which are unlocked | 360 | * Completion handler for block_write_full_page() - pages which are unlocked |
361 | * during I/O, and which have PageWriteback cleared upon I/O completion. | 361 | * during I/O, and which have PageWriteback cleared upon I/O completion. |
362 | */ | 362 | */ |
363 | static void end_buffer_async_write(struct buffer_head *bh, int uptodate) | 363 | void end_buffer_async_write(struct buffer_head *bh, int uptodate) |
364 | { | 364 | { |
365 | char b[BDEVNAME_SIZE]; | 365 | char b[BDEVNAME_SIZE]; |
366 | unsigned long flags; | 366 | unsigned long flags; |
@@ -438,11 +438,17 @@ static void mark_buffer_async_read(struct buffer_head *bh) | |||
438 | set_buffer_async_read(bh); | 438 | set_buffer_async_read(bh); |
439 | } | 439 | } |
440 | 440 | ||
441 | void mark_buffer_async_write(struct buffer_head *bh) | 441 | void mark_buffer_async_write_endio(struct buffer_head *bh, |
442 | bh_end_io_t *handler) | ||
442 | { | 443 | { |
443 | bh->b_end_io = end_buffer_async_write; | 444 | bh->b_end_io = handler; |
444 | set_buffer_async_write(bh); | 445 | set_buffer_async_write(bh); |
445 | } | 446 | } |
447 | |||
448 | void mark_buffer_async_write(struct buffer_head *bh) | ||
449 | { | ||
450 | mark_buffer_async_write_endio(bh, end_buffer_async_write); | ||
451 | } | ||
446 | EXPORT_SYMBOL(mark_buffer_async_write); | 452 | EXPORT_SYMBOL(mark_buffer_async_write); |
447 | 453 | ||
448 | 454 | ||
@@ -547,7 +553,7 @@ repeat: | |||
547 | return err; | 553 | return err; |
548 | } | 554 | } |
549 | 555 | ||
550 | void do_thaw_all(unsigned long unused) | 556 | void do_thaw_all(struct work_struct *work) |
551 | { | 557 | { |
552 | struct super_block *sb; | 558 | struct super_block *sb; |
553 | char b[BDEVNAME_SIZE]; | 559 | char b[BDEVNAME_SIZE]; |
@@ -567,6 +573,7 @@ restart: | |||
567 | goto restart; | 573 | goto restart; |
568 | } | 574 | } |
569 | spin_unlock(&sb_lock); | 575 | spin_unlock(&sb_lock); |
576 | kfree(work); | ||
570 | printk(KERN_WARNING "Emergency Thaw complete\n"); | 577 | printk(KERN_WARNING "Emergency Thaw complete\n"); |
571 | } | 578 | } |
572 | 579 | ||
@@ -577,7 +584,13 @@ restart: | |||
577 | */ | 584 | */ |
578 | void emergency_thaw_all(void) | 585 | void emergency_thaw_all(void) |
579 | { | 586 | { |
580 | pdflush_operation(do_thaw_all, 0); | 587 | struct work_struct *work; |
588 | |||
589 | work = kmalloc(sizeof(*work), GFP_ATOMIC); | ||
590 | if (work) { | ||
591 | INIT_WORK(work, do_thaw_all); | ||
592 | schedule_work(work); | ||
593 | } | ||
581 | } | 594 | } |
582 | 595 | ||
583 | /** | 596 | /** |
@@ -1608,7 +1621,8 @@ EXPORT_SYMBOL(unmap_underlying_metadata); | |||
1608 | * unplugging the device queue. | 1621 | * unplugging the device queue. |
1609 | */ | 1622 | */ |
1610 | static int __block_write_full_page(struct inode *inode, struct page *page, | 1623 | static int __block_write_full_page(struct inode *inode, struct page *page, |
1611 | get_block_t *get_block, struct writeback_control *wbc) | 1624 | get_block_t *get_block, struct writeback_control *wbc, |
1625 | bh_end_io_t *handler) | ||
1612 | { | 1626 | { |
1613 | int err; | 1627 | int err; |
1614 | sector_t block; | 1628 | sector_t block; |
@@ -1693,7 +1707,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, | |||
1693 | continue; | 1707 | continue; |
1694 | } | 1708 | } |
1695 | if (test_clear_buffer_dirty(bh)) { | 1709 | if (test_clear_buffer_dirty(bh)) { |
1696 | mark_buffer_async_write(bh); | 1710 | mark_buffer_async_write_endio(bh, handler); |
1697 | } else { | 1711 | } else { |
1698 | unlock_buffer(bh); | 1712 | unlock_buffer(bh); |
1699 | } | 1713 | } |
@@ -1746,7 +1760,7 @@ recover: | |||
1746 | if (buffer_mapped(bh) && buffer_dirty(bh) && | 1760 | if (buffer_mapped(bh) && buffer_dirty(bh) && |
1747 | !buffer_delay(bh)) { | 1761 | !buffer_delay(bh)) { |
1748 | lock_buffer(bh); | 1762 | lock_buffer(bh); |
1749 | mark_buffer_async_write(bh); | 1763 | mark_buffer_async_write_endio(bh, handler); |
1750 | } else { | 1764 | } else { |
1751 | /* | 1765 | /* |
1752 | * The buffer may have been set dirty during | 1766 | * The buffer may have been set dirty during |
@@ -2383,7 +2397,8 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | |||
2383 | if ((page->mapping != inode->i_mapping) || | 2397 | if ((page->mapping != inode->i_mapping) || |
2384 | (page_offset(page) > size)) { | 2398 | (page_offset(page) > size)) { |
2385 | /* page got truncated out from underneath us */ | 2399 | /* page got truncated out from underneath us */ |
2386 | goto out_unlock; | 2400 | unlock_page(page); |
2401 | goto out; | ||
2387 | } | 2402 | } |
2388 | 2403 | ||
2389 | /* page is wholly or partially inside EOF */ | 2404 | /* page is wholly or partially inside EOF */ |
@@ -2397,14 +2412,15 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | |||
2397 | ret = block_commit_write(page, 0, end); | 2412 | ret = block_commit_write(page, 0, end); |
2398 | 2413 | ||
2399 | if (unlikely(ret)) { | 2414 | if (unlikely(ret)) { |
2415 | unlock_page(page); | ||
2400 | if (ret == -ENOMEM) | 2416 | if (ret == -ENOMEM) |
2401 | ret = VM_FAULT_OOM; | 2417 | ret = VM_FAULT_OOM; |
2402 | else /* -ENOSPC, -EIO, etc */ | 2418 | else /* -ENOSPC, -EIO, etc */ |
2403 | ret = VM_FAULT_SIGBUS; | 2419 | ret = VM_FAULT_SIGBUS; |
2404 | } | 2420 | } else |
2421 | ret = VM_FAULT_LOCKED; | ||
2405 | 2422 | ||
2406 | out_unlock: | 2423 | out: |
2407 | unlock_page(page); | ||
2408 | return ret; | 2424 | return ret; |
2409 | } | 2425 | } |
2410 | 2426 | ||
@@ -2672,7 +2688,8 @@ int nobh_writepage(struct page *page, get_block_t *get_block, | |||
2672 | out: | 2688 | out: |
2673 | ret = mpage_writepage(page, get_block, wbc); | 2689 | ret = mpage_writepage(page, get_block, wbc); |
2674 | if (ret == -EAGAIN) | 2690 | if (ret == -EAGAIN) |
2675 | ret = __block_write_full_page(inode, page, get_block, wbc); | 2691 | ret = __block_write_full_page(inode, page, get_block, wbc, |
2692 | end_buffer_async_write); | ||
2676 | return ret; | 2693 | return ret; |
2677 | } | 2694 | } |
2678 | EXPORT_SYMBOL(nobh_writepage); | 2695 | EXPORT_SYMBOL(nobh_writepage); |
@@ -2830,9 +2847,10 @@ out: | |||
2830 | 2847 | ||
2831 | /* | 2848 | /* |
2832 | * The generic ->writepage function for buffer-backed address_spaces | 2849 | * The generic ->writepage function for buffer-backed address_spaces |
2850 | * this form passes in the end_io handler used to finish the IO. | ||
2833 | */ | 2851 | */ |
2834 | int block_write_full_page(struct page *page, get_block_t *get_block, | 2852 | int block_write_full_page_endio(struct page *page, get_block_t *get_block, |
2835 | struct writeback_control *wbc) | 2853 | struct writeback_control *wbc, bh_end_io_t *handler) |
2836 | { | 2854 | { |
2837 | struct inode * const inode = page->mapping->host; | 2855 | struct inode * const inode = page->mapping->host; |
2838 | loff_t i_size = i_size_read(inode); | 2856 | loff_t i_size = i_size_read(inode); |
@@ -2841,7 +2859,8 @@ int block_write_full_page(struct page *page, get_block_t *get_block, | |||
2841 | 2859 | ||
2842 | /* Is the page fully inside i_size? */ | 2860 | /* Is the page fully inside i_size? */ |
2843 | if (page->index < end_index) | 2861 | if (page->index < end_index) |
2844 | return __block_write_full_page(inode, page, get_block, wbc); | 2862 | return __block_write_full_page(inode, page, get_block, wbc, |
2863 | handler); | ||
2845 | 2864 | ||
2846 | /* Is the page fully outside i_size? (truncate in progress) */ | 2865 | /* Is the page fully outside i_size? (truncate in progress) */ |
2847 | offset = i_size & (PAGE_CACHE_SIZE-1); | 2866 | offset = i_size & (PAGE_CACHE_SIZE-1); |
@@ -2864,9 +2883,20 @@ int block_write_full_page(struct page *page, get_block_t *get_block, | |||
2864 | * writes to that region are not written out to the file." | 2883 | * writes to that region are not written out to the file." |
2865 | */ | 2884 | */ |
2866 | zero_user_segment(page, offset, PAGE_CACHE_SIZE); | 2885 | zero_user_segment(page, offset, PAGE_CACHE_SIZE); |
2867 | return __block_write_full_page(inode, page, get_block, wbc); | 2886 | return __block_write_full_page(inode, page, get_block, wbc, handler); |
2887 | } | ||
2888 | |||
2889 | /* | ||
2890 | * The generic ->writepage function for buffer-backed address_spaces | ||
2891 | */ | ||
2892 | int block_write_full_page(struct page *page, get_block_t *get_block, | ||
2893 | struct writeback_control *wbc) | ||
2894 | { | ||
2895 | return block_write_full_page_endio(page, get_block, wbc, | ||
2896 | end_buffer_async_write); | ||
2868 | } | 2897 | } |
2869 | 2898 | ||
2899 | |||
2870 | sector_t generic_block_bmap(struct address_space *mapping, sector_t block, | 2900 | sector_t generic_block_bmap(struct address_space *mapping, sector_t block, |
2871 | get_block_t *get_block) | 2901 | get_block_t *get_block) |
2872 | { | 2902 | { |
@@ -3335,9 +3365,11 @@ EXPORT_SYMBOL(block_read_full_page); | |||
3335 | EXPORT_SYMBOL(block_sync_page); | 3365 | EXPORT_SYMBOL(block_sync_page); |
3336 | EXPORT_SYMBOL(block_truncate_page); | 3366 | EXPORT_SYMBOL(block_truncate_page); |
3337 | EXPORT_SYMBOL(block_write_full_page); | 3367 | EXPORT_SYMBOL(block_write_full_page); |
3368 | EXPORT_SYMBOL(block_write_full_page_endio); | ||
3338 | EXPORT_SYMBOL(cont_write_begin); | 3369 | EXPORT_SYMBOL(cont_write_begin); |
3339 | EXPORT_SYMBOL(end_buffer_read_sync); | 3370 | EXPORT_SYMBOL(end_buffer_read_sync); |
3340 | EXPORT_SYMBOL(end_buffer_write_sync); | 3371 | EXPORT_SYMBOL(end_buffer_write_sync); |
3372 | EXPORT_SYMBOL(end_buffer_async_write); | ||
3341 | EXPORT_SYMBOL(file_fsync); | 3373 | EXPORT_SYMBOL(file_fsync); |
3342 | EXPORT_SYMBOL(generic_block_bmap); | 3374 | EXPORT_SYMBOL(generic_block_bmap); |
3343 | EXPORT_SYMBOL(generic_cont_expand_simple); | 3375 | EXPORT_SYMBOL(generic_cont_expand_simple); |