diff options
Diffstat (limited to 'fs/buffer.c')
-rw-r--r-- | fs/buffer.c | 47 |
1 files changed, 25 insertions, 22 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 3b3ab5281920..23f1f3a68077 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -426,8 +426,10 @@ __find_get_block_slow(struct block_device *bdev, sector_t block) | |||
426 | if (all_mapped) { | 426 | if (all_mapped) { |
427 | printk("__find_get_block_slow() failed. " | 427 | printk("__find_get_block_slow() failed. " |
428 | "block=%llu, b_blocknr=%llu\n", | 428 | "block=%llu, b_blocknr=%llu\n", |
429 | (unsigned long long)block, (unsigned long long)bh->b_blocknr); | 429 | (unsigned long long)block, |
430 | printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size); | 430 | (unsigned long long)bh->b_blocknr); |
431 | printk("b_state=0x%08lx, b_size=%zu\n", | ||
432 | bh->b_state, bh->b_size); | ||
431 | printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits); | 433 | printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits); |
432 | } | 434 | } |
433 | out_unlock: | 435 | out_unlock: |
@@ -491,7 +493,7 @@ static void free_more_memory(void) | |||
491 | wakeup_pdflush(1024); | 493 | wakeup_pdflush(1024); |
492 | yield(); | 494 | yield(); |
493 | 495 | ||
494 | for_each_pgdat(pgdat) { | 496 | for_each_online_pgdat(pgdat) { |
495 | zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones; | 497 | zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones; |
496 | if (*zones) | 498 | if (*zones) |
497 | try_to_free_pages(zones, GFP_NOFS); | 499 | try_to_free_pages(zones, GFP_NOFS); |
@@ -796,8 +798,7 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) | |||
796 | if (!mapping->assoc_mapping) { | 798 | if (!mapping->assoc_mapping) { |
797 | mapping->assoc_mapping = buffer_mapping; | 799 | mapping->assoc_mapping = buffer_mapping; |
798 | } else { | 800 | } else { |
799 | if (mapping->assoc_mapping != buffer_mapping) | 801 | BUG_ON(mapping->assoc_mapping != buffer_mapping); |
800 | BUG(); | ||
801 | } | 802 | } |
802 | if (list_empty(&bh->b_assoc_buffers)) { | 803 | if (list_empty(&bh->b_assoc_buffers)) { |
803 | spin_lock(&buffer_mapping->private_lock); | 804 | spin_lock(&buffer_mapping->private_lock); |
@@ -1114,8 +1115,7 @@ grow_dev_page(struct block_device *bdev, sector_t block, | |||
1114 | if (!page) | 1115 | if (!page) |
1115 | return NULL; | 1116 | return NULL; |
1116 | 1117 | ||
1117 | if (!PageLocked(page)) | 1118 | BUG_ON(!PageLocked(page)); |
1118 | BUG(); | ||
1119 | 1119 | ||
1120 | if (page_has_buffers(page)) { | 1120 | if (page_has_buffers(page)) { |
1121 | bh = page_buffers(page); | 1121 | bh = page_buffers(page); |
@@ -1522,8 +1522,7 @@ void set_bh_page(struct buffer_head *bh, | |||
1522 | struct page *page, unsigned long offset) | 1522 | struct page *page, unsigned long offset) |
1523 | { | 1523 | { |
1524 | bh->b_page = page; | 1524 | bh->b_page = page; |
1525 | if (offset >= PAGE_SIZE) | 1525 | BUG_ON(offset >= PAGE_SIZE); |
1526 | BUG(); | ||
1527 | if (PageHighMem(page)) | 1526 | if (PageHighMem(page)) |
1528 | /* | 1527 | /* |
1529 | * This catches illegal uses and preserves the offset: | 1528 | * This catches illegal uses and preserves the offset: |
@@ -1593,11 +1592,10 @@ EXPORT_SYMBOL(try_to_release_page); | |||
1593 | * point. Because the caller is about to free (and possibly reuse) those | 1592 | * point. Because the caller is about to free (and possibly reuse) those |
1594 | * blocks on-disk. | 1593 | * blocks on-disk. |
1595 | */ | 1594 | */ |
1596 | int block_invalidatepage(struct page *page, unsigned long offset) | 1595 | void block_invalidatepage(struct page *page, unsigned long offset) |
1597 | { | 1596 | { |
1598 | struct buffer_head *head, *bh, *next; | 1597 | struct buffer_head *head, *bh, *next; |
1599 | unsigned int curr_off = 0; | 1598 | unsigned int curr_off = 0; |
1600 | int ret = 1; | ||
1601 | 1599 | ||
1602 | BUG_ON(!PageLocked(page)); | 1600 | BUG_ON(!PageLocked(page)); |
1603 | if (!page_has_buffers(page)) | 1601 | if (!page_has_buffers(page)) |
@@ -1624,19 +1622,18 @@ int block_invalidatepage(struct page *page, unsigned long offset) | |||
1624 | * so real IO is not possible anymore. | 1622 | * so real IO is not possible anymore. |
1625 | */ | 1623 | */ |
1626 | if (offset == 0) | 1624 | if (offset == 0) |
1627 | ret = try_to_release_page(page, 0); | 1625 | try_to_release_page(page, 0); |
1628 | out: | 1626 | out: |
1629 | return ret; | 1627 | return; |
1630 | } | 1628 | } |
1631 | EXPORT_SYMBOL(block_invalidatepage); | 1629 | EXPORT_SYMBOL(block_invalidatepage); |
1632 | 1630 | ||
1633 | int do_invalidatepage(struct page *page, unsigned long offset) | 1631 | void do_invalidatepage(struct page *page, unsigned long offset) |
1634 | { | 1632 | { |
1635 | int (*invalidatepage)(struct page *, unsigned long); | 1633 | void (*invalidatepage)(struct page *, unsigned long); |
1636 | invalidatepage = page->mapping->a_ops->invalidatepage; | 1634 | invalidatepage = page->mapping->a_ops->invalidatepage ? : |
1637 | if (invalidatepage == NULL) | 1635 | block_invalidatepage; |
1638 | invalidatepage = block_invalidatepage; | 1636 | (*invalidatepage)(page, offset); |
1639 | return (*invalidatepage)(page, offset); | ||
1640 | } | 1637 | } |
1641 | 1638 | ||
1642 | /* | 1639 | /* |
@@ -1738,6 +1735,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, | |||
1738 | sector_t block; | 1735 | sector_t block; |
1739 | sector_t last_block; | 1736 | sector_t last_block; |
1740 | struct buffer_head *bh, *head; | 1737 | struct buffer_head *bh, *head; |
1738 | const unsigned blocksize = 1 << inode->i_blkbits; | ||
1741 | int nr_underway = 0; | 1739 | int nr_underway = 0; |
1742 | 1740 | ||
1743 | BUG_ON(!PageLocked(page)); | 1741 | BUG_ON(!PageLocked(page)); |
@@ -1745,7 +1743,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, | |||
1745 | last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; | 1743 | last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; |
1746 | 1744 | ||
1747 | if (!page_has_buffers(page)) { | 1745 | if (!page_has_buffers(page)) { |
1748 | create_empty_buffers(page, 1 << inode->i_blkbits, | 1746 | create_empty_buffers(page, blocksize, |
1749 | (1 << BH_Dirty)|(1 << BH_Uptodate)); | 1747 | (1 << BH_Dirty)|(1 << BH_Uptodate)); |
1750 | } | 1748 | } |
1751 | 1749 | ||
@@ -1780,6 +1778,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, | |||
1780 | clear_buffer_dirty(bh); | 1778 | clear_buffer_dirty(bh); |
1781 | set_buffer_uptodate(bh); | 1779 | set_buffer_uptodate(bh); |
1782 | } else if (!buffer_mapped(bh) && buffer_dirty(bh)) { | 1780 | } else if (!buffer_mapped(bh) && buffer_dirty(bh)) { |
1781 | WARN_ON(bh->b_size != blocksize); | ||
1783 | err = get_block(inode, block, bh, 1); | 1782 | err = get_block(inode, block, bh, 1); |
1784 | if (err) | 1783 | if (err) |
1785 | goto recover; | 1784 | goto recover; |
@@ -1933,6 +1932,7 @@ static int __block_prepare_write(struct inode *inode, struct page *page, | |||
1933 | if (buffer_new(bh)) | 1932 | if (buffer_new(bh)) |
1934 | clear_buffer_new(bh); | 1933 | clear_buffer_new(bh); |
1935 | if (!buffer_mapped(bh)) { | 1934 | if (!buffer_mapped(bh)) { |
1935 | WARN_ON(bh->b_size != blocksize); | ||
1936 | err = get_block(inode, block, bh, 1); | 1936 | err = get_block(inode, block, bh, 1); |
1937 | if (err) | 1937 | if (err) |
1938 | break; | 1938 | break; |
@@ -2088,6 +2088,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block) | |||
2088 | 2088 | ||
2089 | fully_mapped = 0; | 2089 | fully_mapped = 0; |
2090 | if (iblock < lblock) { | 2090 | if (iblock < lblock) { |
2091 | WARN_ON(bh->b_size != blocksize); | ||
2091 | err = get_block(inode, iblock, bh, 0); | 2092 | err = get_block(inode, iblock, bh, 0); |
2092 | if (err) | 2093 | if (err) |
2093 | SetPageError(page); | 2094 | SetPageError(page); |
@@ -2409,6 +2410,7 @@ int nobh_prepare_write(struct page *page, unsigned from, unsigned to, | |||
2409 | create = 1; | 2410 | create = 1; |
2410 | if (block_start >= to) | 2411 | if (block_start >= to) |
2411 | create = 0; | 2412 | create = 0; |
2413 | map_bh.b_size = blocksize; | ||
2412 | ret = get_block(inode, block_in_file + block_in_page, | 2414 | ret = get_block(inode, block_in_file + block_in_page, |
2413 | &map_bh, create); | 2415 | &map_bh, create); |
2414 | if (ret) | 2416 | if (ret) |
@@ -2669,6 +2671,7 @@ int block_truncate_page(struct address_space *mapping, | |||
2669 | 2671 | ||
2670 | err = 0; | 2672 | err = 0; |
2671 | if (!buffer_mapped(bh)) { | 2673 | if (!buffer_mapped(bh)) { |
2674 | WARN_ON(bh->b_size != blocksize); | ||
2672 | err = get_block(inode, iblock, bh, 0); | 2675 | err = get_block(inode, iblock, bh, 0); |
2673 | if (err) | 2676 | if (err) |
2674 | goto unlock; | 2677 | goto unlock; |
@@ -2755,6 +2758,7 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block, | |||
2755 | struct inode *inode = mapping->host; | 2758 | struct inode *inode = mapping->host; |
2756 | tmp.b_state = 0; | 2759 | tmp.b_state = 0; |
2757 | tmp.b_blocknr = 0; | 2760 | tmp.b_blocknr = 0; |
2761 | tmp.b_size = 1 << inode->i_blkbits; | ||
2758 | get_block(inode, block, &tmp, 0); | 2762 | get_block(inode, block, &tmp, 0); |
2759 | return tmp.b_blocknr; | 2763 | return tmp.b_blocknr; |
2760 | } | 2764 | } |
@@ -3007,7 +3011,7 @@ out: | |||
3007 | } | 3011 | } |
3008 | EXPORT_SYMBOL(try_to_free_buffers); | 3012 | EXPORT_SYMBOL(try_to_free_buffers); |
3009 | 3013 | ||
3010 | int block_sync_page(struct page *page) | 3014 | void block_sync_page(struct page *page) |
3011 | { | 3015 | { |
3012 | struct address_space *mapping; | 3016 | struct address_space *mapping; |
3013 | 3017 | ||
@@ -3015,7 +3019,6 @@ int block_sync_page(struct page *page) | |||
3015 | mapping = page_mapping(page); | 3019 | mapping = page_mapping(page); |
3016 | if (mapping) | 3020 | if (mapping) |
3017 | blk_run_backing_dev(mapping->backing_dev_info, page); | 3021 | blk_run_backing_dev(mapping->backing_dev_info, page); |
3018 | return 0; | ||
3019 | } | 3022 | } |
3020 | 3023 | ||
3021 | /* | 3024 | /* |