aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-04-14 05:32:23 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-14 05:32:30 -0400
commit05cfbd66d07c44865983c8b65ae9d0037d874206 (patch)
tree084b665cc97b47d1592fe76ea0a39a7753288a02 /fs/buffer.c
parent31c9a24ec82926fcae49483e53566d231e705057 (diff)
parentef631b0ca01655d24e9ca7e199262c4a46416a26 (diff)
Merge branch 'core/urgent' into core/rcu
Merge reason: new patches to be queued up depend on: ef631b0: rcu: Make hierarchical RCU less IPI-happy Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c101
1 files changed, 79 insertions, 22 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index a2fd743d97cb..13edf7ad3ff1 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -199,13 +199,13 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
199 head = page_buffers(page); 199 head = page_buffers(page);
200 bh = head; 200 bh = head;
201 do { 201 do {
202 if (bh->b_blocknr == block) { 202 if (!buffer_mapped(bh))
203 all_mapped = 0;
204 else if (bh->b_blocknr == block) {
203 ret = bh; 205 ret = bh;
204 get_bh(bh); 206 get_bh(bh);
205 goto out_unlock; 207 goto out_unlock;
206 } 208 }
207 if (!buffer_mapped(bh))
208 all_mapped = 0;
209 bh = bh->b_this_page; 209 bh = bh->b_this_page;
210 } while (bh != head); 210 } while (bh != head);
211 211
@@ -290,7 +290,7 @@ static void free_more_memory(void)
290 &zone); 290 &zone);
291 if (zone) 291 if (zone)
292 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0, 292 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
293 GFP_NOFS); 293 GFP_NOFS, NULL);
294 } 294 }
295} 295}
296 296
@@ -547,6 +547,39 @@ repeat:
547 return err; 547 return err;
548} 548}
549 549
550void do_thaw_all(unsigned long unused)
551{
552 struct super_block *sb;
553 char b[BDEVNAME_SIZE];
554
555 spin_lock(&sb_lock);
556restart:
557 list_for_each_entry(sb, &super_blocks, s_list) {
558 sb->s_count++;
559 spin_unlock(&sb_lock);
560 down_read(&sb->s_umount);
561 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
562 printk(KERN_WARNING "Emergency Thaw on %s\n",
563 bdevname(sb->s_bdev, b));
564 up_read(&sb->s_umount);
565 spin_lock(&sb_lock);
566 if (__put_super_and_need_restart(sb))
567 goto restart;
568 }
569 spin_unlock(&sb_lock);
570 printk(KERN_WARNING "Emergency Thaw complete\n");
571}
572
573/**
574 * emergency_thaw_all -- forcibly thaw every frozen filesystem
575 *
576 * Used for emergency unfreeze of all filesystems via SysRq
577 */
578void emergency_thaw_all(void)
579{
580 pdflush_operation(do_thaw_all, 0);
581}
582
550/** 583/**
551 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers 584 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
552 * @mapping: the mapping which wants those buffers written 585 * @mapping: the mapping which wants those buffers written
@@ -621,14 +654,7 @@ static void __set_page_dirty(struct page *page,
621 spin_lock_irq(&mapping->tree_lock); 654 spin_lock_irq(&mapping->tree_lock);
622 if (page->mapping) { /* Race with truncate? */ 655 if (page->mapping) { /* Race with truncate? */
623 WARN_ON_ONCE(warn && !PageUptodate(page)); 656 WARN_ON_ONCE(warn && !PageUptodate(page));
624 657 account_page_dirtied(page, mapping);
625 if (mapping_cap_account_dirty(mapping)) {
626 __inc_zone_page_state(page, NR_FILE_DIRTY);
627 __inc_bdi_stat(mapping->backing_dev_info,
628 BDI_RECLAIMABLE);
629 task_dirty_inc(current);
630 task_io_account_write(PAGE_CACHE_SIZE);
631 }
632 radix_tree_tag_set(&mapping->page_tree, 658 radix_tree_tag_set(&mapping->page_tree,
633 page_index(page), PAGECACHE_TAG_DIRTY); 659 page_index(page), PAGECACHE_TAG_DIRTY);
634 } 660 }
@@ -711,7 +737,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
711{ 737{
712 struct buffer_head *bh; 738 struct buffer_head *bh;
713 struct list_head tmp; 739 struct list_head tmp;
714 struct address_space *mapping; 740 struct address_space *mapping, *prev_mapping = NULL;
715 int err = 0, err2; 741 int err = 0, err2;
716 742
717 INIT_LIST_HEAD(&tmp); 743 INIT_LIST_HEAD(&tmp);
@@ -736,7 +762,18 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
736 * contents - it is a noop if I/O is still in 762 * contents - it is a noop if I/O is still in
737 * flight on potentially older contents. 763 * flight on potentially older contents.
738 */ 764 */
739 ll_rw_block(SWRITE_SYNC, 1, &bh); 765 ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
766
767 /*
768 * Kick off IO for the previous mapping. Note
769 * that we will not run the very last mapping,
770 * wait_on_buffer() will do that for us
771 * through sync_buffer().
772 */
773 if (prev_mapping && prev_mapping != mapping)
774 blk_run_address_space(prev_mapping);
775 prev_mapping = mapping;
776
740 brelse(bh); 777 brelse(bh);
741 spin_lock(lock); 778 spin_lock(lock);
742 } 779 }
@@ -1559,6 +1596,16 @@ EXPORT_SYMBOL(unmap_underlying_metadata);
1559 * locked buffer. This only can happen if someone has written the buffer 1596 * locked buffer. This only can happen if someone has written the buffer
1560 * directly, with submit_bh(). At the address_space level PageWriteback 1597 * directly, with submit_bh(). At the address_space level PageWriteback
1561 * prevents this contention from occurring. 1598 * prevents this contention from occurring.
1599 *
1600 * If block_write_full_page() is called with wbc->sync_mode ==
1601 * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
1602 * causes the writes to be flagged as synchronous writes, but the
1603 * block device queue will NOT be unplugged, since usually many pages
1604 * will be pushed to the out before the higher-level caller actually
1605 * waits for the writes to be completed. The various wait functions,
1606 * such as wait_on_writeback_range() will ultimately call sync_page()
1607 * which will ultimately call blk_run_backing_dev(), which will end up
1608 * unplugging the device queue.
1562 */ 1609 */
1563static int __block_write_full_page(struct inode *inode, struct page *page, 1610static int __block_write_full_page(struct inode *inode, struct page *page,
1564 get_block_t *get_block, struct writeback_control *wbc) 1611 get_block_t *get_block, struct writeback_control *wbc)
@@ -1569,6 +1616,8 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1569 struct buffer_head *bh, *head; 1616 struct buffer_head *bh, *head;
1570 const unsigned blocksize = 1 << inode->i_blkbits; 1617 const unsigned blocksize = 1 << inode->i_blkbits;
1571 int nr_underway = 0; 1618 int nr_underway = 0;
1619 int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1620 WRITE_SYNC_PLUG : WRITE);
1572 1621
1573 BUG_ON(!PageLocked(page)); 1622 BUG_ON(!PageLocked(page));
1574 1623
@@ -1660,7 +1709,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1660 do { 1709 do {
1661 struct buffer_head *next = bh->b_this_page; 1710 struct buffer_head *next = bh->b_this_page;
1662 if (buffer_async_write(bh)) { 1711 if (buffer_async_write(bh)) {
1663 submit_bh(WRITE, bh); 1712 submit_bh(write_op, bh);
1664 nr_underway++; 1713 nr_underway++;
1665 } 1714 }
1666 bh = next; 1715 bh = next;
@@ -1714,7 +1763,7 @@ recover:
1714 struct buffer_head *next = bh->b_this_page; 1763 struct buffer_head *next = bh->b_this_page;
1715 if (buffer_async_write(bh)) { 1764 if (buffer_async_write(bh)) {
1716 clear_buffer_dirty(bh); 1765 clear_buffer_dirty(bh);
1717 submit_bh(WRITE, bh); 1766 submit_bh(write_op, bh);
1718 nr_underway++; 1767 nr_underway++;
1719 } 1768 }
1720 bh = next; 1769 bh = next;
@@ -2320,13 +2369,14 @@ int block_commit_write(struct page *page, unsigned from, unsigned to)
2320 * unlock the page. 2369 * unlock the page.
2321 */ 2370 */
2322int 2371int
2323block_page_mkwrite(struct vm_area_struct *vma, struct page *page, 2372block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2324 get_block_t get_block) 2373 get_block_t get_block)
2325{ 2374{
2375 struct page *page = vmf->page;
2326 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 2376 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2327 unsigned long end; 2377 unsigned long end;
2328 loff_t size; 2378 loff_t size;
2329 int ret = -EINVAL; 2379 int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
2330 2380
2331 lock_page(page); 2381 lock_page(page);
2332 size = i_size_read(inode); 2382 size = i_size_read(inode);
@@ -2346,6 +2396,13 @@ block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
2346 if (!ret) 2396 if (!ret)
2347 ret = block_commit_write(page, 0, end); 2397 ret = block_commit_write(page, 0, end);
2348 2398
2399 if (unlikely(ret)) {
2400 if (ret == -ENOMEM)
2401 ret = VM_FAULT_OOM;
2402 else /* -ENOSPC, -EIO, etc */
2403 ret = VM_FAULT_SIGBUS;
2404 }
2405
2349out_unlock: 2406out_unlock:
2350 unlock_page(page); 2407 unlock_page(page);
2351 return ret; 2408 return ret;
@@ -2922,12 +2979,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2922 for (i = 0; i < nr; i++) { 2979 for (i = 0; i < nr; i++) {
2923 struct buffer_head *bh = bhs[i]; 2980 struct buffer_head *bh = bhs[i];
2924 2981
2925 if (rw == SWRITE || rw == SWRITE_SYNC) 2982 if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
2926 lock_buffer(bh); 2983 lock_buffer(bh);
2927 else if (!trylock_buffer(bh)) 2984 else if (!trylock_buffer(bh))
2928 continue; 2985 continue;
2929 2986
2930 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) { 2987 if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
2988 rw == SWRITE_SYNC_PLUG) {
2931 if (test_clear_buffer_dirty(bh)) { 2989 if (test_clear_buffer_dirty(bh)) {
2932 bh->b_end_io = end_buffer_write_sync; 2990 bh->b_end_io = end_buffer_write_sync;
2933 get_bh(bh); 2991 get_bh(bh);
@@ -2963,7 +3021,7 @@ int sync_dirty_buffer(struct buffer_head *bh)
2963 if (test_clear_buffer_dirty(bh)) { 3021 if (test_clear_buffer_dirty(bh)) {
2964 get_bh(bh); 3022 get_bh(bh);
2965 bh->b_end_io = end_buffer_write_sync; 3023 bh->b_end_io = end_buffer_write_sync;
2966 ret = submit_bh(WRITE, bh); 3024 ret = submit_bh(WRITE_SYNC, bh);
2967 wait_on_buffer(bh); 3025 wait_on_buffer(bh);
2968 if (buffer_eopnotsupp(bh)) { 3026 if (buffer_eopnotsupp(bh)) {
2969 clear_buffer_eopnotsupp(bh); 3027 clear_buffer_eopnotsupp(bh);
@@ -3281,7 +3339,6 @@ EXPORT_SYMBOL(cont_write_begin);
3281EXPORT_SYMBOL(end_buffer_read_sync); 3339EXPORT_SYMBOL(end_buffer_read_sync);
3282EXPORT_SYMBOL(end_buffer_write_sync); 3340EXPORT_SYMBOL(end_buffer_write_sync);
3283EXPORT_SYMBOL(file_fsync); 3341EXPORT_SYMBOL(file_fsync);
3284EXPORT_SYMBOL(fsync_bdev);
3285EXPORT_SYMBOL(generic_block_bmap); 3342EXPORT_SYMBOL(generic_block_bmap);
3286EXPORT_SYMBOL(generic_cont_expand_simple); 3343EXPORT_SYMBOL(generic_cont_expand_simple);
3287EXPORT_SYMBOL(init_buffer); 3344EXPORT_SYMBOL(init_buffer);