aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-08-18 21:34:07 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-18 21:34:07 -0400
commit2879a927bb7a3cf91ae3906a5e59215f9c17dd75 (patch)
tree870bdd1bd530a3d5d2abd10539700446b2878188 /fs/buffer.c
parent7e7b43892b87b6be259479ef4de14029dcb4012f (diff)
parent20211e4d344729f4d4c93da37a590fc1c3a1fd9b (diff)
Merge branch 'x86/oprofile' into oprofile
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c61
1 files changed, 53 insertions, 8 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index d48caee12e2a..38653e36e225 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -580,7 +580,7 @@ EXPORT_SYMBOL(mark_buffer_async_write);
580/* 580/*
581 * The buffer's backing address_space's private_lock must be held 581 * The buffer's backing address_space's private_lock must be held
582 */ 582 */
583static inline void __remove_assoc_queue(struct buffer_head *bh) 583static void __remove_assoc_queue(struct buffer_head *bh)
584{ 584{
585 list_del_init(&bh->b_assoc_buffers); 585 list_del_init(&bh->b_assoc_buffers);
586 WARN_ON(!bh->b_assoc_map); 586 WARN_ON(!bh->b_assoc_map);
@@ -706,7 +706,7 @@ static int __set_page_dirty(struct page *page,
706 if (TestSetPageDirty(page)) 706 if (TestSetPageDirty(page))
707 return 0; 707 return 0;
708 708
709 write_lock_irq(&mapping->tree_lock); 709 spin_lock_irq(&mapping->tree_lock);
710 if (page->mapping) { /* Race with truncate? */ 710 if (page->mapping) { /* Race with truncate? */
711 WARN_ON_ONCE(warn && !PageUptodate(page)); 711 WARN_ON_ONCE(warn && !PageUptodate(page));
712 712
@@ -719,7 +719,7 @@ static int __set_page_dirty(struct page *page,
719 radix_tree_tag_set(&mapping->page_tree, 719 radix_tree_tag_set(&mapping->page_tree,
720 page_index(page), PAGECACHE_TAG_DIRTY); 720 page_index(page), PAGECACHE_TAG_DIRTY);
721 } 721 }
722 write_unlock_irq(&mapping->tree_lock); 722 spin_unlock_irq(&mapping->tree_lock);
723 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 723 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
724 724
725 return 1; 725 return 1;
@@ -1214,8 +1214,7 @@ void __brelse(struct buffer_head * buf)
1214 put_bh(buf); 1214 put_bh(buf);
1215 return; 1215 return;
1216 } 1216 }
1217 printk(KERN_ERR "VFS: brelse: Trying to free free buffer\n"); 1217 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1218 WARN_ON(1);
1219} 1218}
1220 1219
1221/* 1220/*
@@ -1721,7 +1720,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1721 */ 1720 */
1722 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { 1721 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1723 lock_buffer(bh); 1722 lock_buffer(bh);
1724 } else if (test_set_buffer_locked(bh)) { 1723 } else if (!trylock_buffer(bh)) {
1725 redirty_page_for_writepage(wbc, page); 1724 redirty_page_for_writepage(wbc, page);
1726 continue; 1725 continue;
1727 } 1726 }
@@ -2097,6 +2096,52 @@ int generic_write_end(struct file *file, struct address_space *mapping,
2097EXPORT_SYMBOL(generic_write_end); 2096EXPORT_SYMBOL(generic_write_end);
2098 2097
2099/* 2098/*
2099 * block_is_partially_uptodate checks whether buffers within a page are
2100 * uptodate or not.
2101 *
2102 * Returns true if all buffers which correspond to a file portion
2103 * we want to read are uptodate.
2104 */
2105int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2106 unsigned long from)
2107{
2108 struct inode *inode = page->mapping->host;
2109 unsigned block_start, block_end, blocksize;
2110 unsigned to;
2111 struct buffer_head *bh, *head;
2112 int ret = 1;
2113
2114 if (!page_has_buffers(page))
2115 return 0;
2116
2117 blocksize = 1 << inode->i_blkbits;
2118 to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2119 to = from + to;
2120 if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2121 return 0;
2122
2123 head = page_buffers(page);
2124 bh = head;
2125 block_start = 0;
2126 do {
2127 block_end = block_start + blocksize;
2128 if (block_end > from && block_start < to) {
2129 if (!buffer_uptodate(bh)) {
2130 ret = 0;
2131 break;
2132 }
2133 if (block_end >= to)
2134 break;
2135 }
2136 block_start = block_end;
2137 bh = bh->b_this_page;
2138 } while (bh != head);
2139
2140 return ret;
2141}
2142EXPORT_SYMBOL(block_is_partially_uptodate);
2143
2144/*
2100 * Generic "read page" function for block devices that have the normal 2145 * Generic "read page" function for block devices that have the normal
2101 * get_block functionality. This is most of the block device filesystems. 2146 * get_block functionality. This is most of the block device filesystems.
2102 * Reads the page asynchronously --- the unlock_buffer() and 2147 * Reads the page asynchronously --- the unlock_buffer() and
@@ -2955,7 +3000,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2955 3000
2956 if (rw == SWRITE || rw == SWRITE_SYNC) 3001 if (rw == SWRITE || rw == SWRITE_SYNC)
2957 lock_buffer(bh); 3002 lock_buffer(bh);
2958 else if (test_set_buffer_locked(bh)) 3003 else if (!trylock_buffer(bh))
2959 continue; 3004 continue;
2960 3005
2961 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) { 3006 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
@@ -3272,7 +3317,7 @@ int bh_submit_read(struct buffer_head *bh)
3272EXPORT_SYMBOL(bh_submit_read); 3317EXPORT_SYMBOL(bh_submit_read);
3273 3318
3274static void 3319static void
3275init_buffer_head(struct kmem_cache *cachep, void *data) 3320init_buffer_head(void *data)
3276{ 3321{
3277 struct buffer_head *bh = data; 3322 struct buffer_head *bh = data;
3278 3323