aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c56
1 files changed, 45 insertions, 11 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index a2fd743d97cb..f5f8b15a6e40 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -290,7 +290,7 @@ static void free_more_memory(void)
290 &zone); 290 &zone);
291 if (zone) 291 if (zone)
292 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0, 292 try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
293 GFP_NOFS); 293 GFP_NOFS, NULL);
294 } 294 }
295} 295}
296 296
@@ -547,6 +547,39 @@ repeat:
547 return err; 547 return err;
548} 548}
549 549
550void do_thaw_all(unsigned long unused)
551{
552 struct super_block *sb;
553 char b[BDEVNAME_SIZE];
554
555 spin_lock(&sb_lock);
556restart:
557 list_for_each_entry(sb, &super_blocks, s_list) {
558 sb->s_count++;
559 spin_unlock(&sb_lock);
560 down_read(&sb->s_umount);
561 while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
562 printk(KERN_WARNING "Emergency Thaw on %s\n",
563 bdevname(sb->s_bdev, b));
564 up_read(&sb->s_umount);
565 spin_lock(&sb_lock);
566 if (__put_super_and_need_restart(sb))
567 goto restart;
568 }
569 spin_unlock(&sb_lock);
570 printk(KERN_WARNING "Emergency Thaw complete\n");
571}
572
573/**
574 * emergency_thaw_all -- forcibly thaw every frozen filesystem
575 *
576 * Used for emergency unfreeze of all filesystems via SysRq
577 */
578void emergency_thaw_all(void)
579{
580 pdflush_operation(do_thaw_all, 0);
581}
582
550/** 583/**
551 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers 584 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
552 * @mapping: the mapping which wants those buffers written 585 * @mapping: the mapping which wants those buffers written
@@ -621,14 +654,7 @@ static void __set_page_dirty(struct page *page,
621 spin_lock_irq(&mapping->tree_lock); 654 spin_lock_irq(&mapping->tree_lock);
622 if (page->mapping) { /* Race with truncate? */ 655 if (page->mapping) { /* Race with truncate? */
623 WARN_ON_ONCE(warn && !PageUptodate(page)); 656 WARN_ON_ONCE(warn && !PageUptodate(page));
624 657 account_page_dirtied(page, mapping);
625 if (mapping_cap_account_dirty(mapping)) {
626 __inc_zone_page_state(page, NR_FILE_DIRTY);
627 __inc_bdi_stat(mapping->backing_dev_info,
628 BDI_RECLAIMABLE);
629 task_dirty_inc(current);
630 task_io_account_write(PAGE_CACHE_SIZE);
631 }
632 radix_tree_tag_set(&mapping->page_tree, 658 radix_tree_tag_set(&mapping->page_tree,
633 page_index(page), PAGECACHE_TAG_DIRTY); 659 page_index(page), PAGECACHE_TAG_DIRTY);
634 } 660 }
@@ -2320,13 +2346,14 @@ int block_commit_write(struct page *page, unsigned from, unsigned to)
2320 * unlock the page. 2346 * unlock the page.
2321 */ 2347 */
2322int 2348int
2323block_page_mkwrite(struct vm_area_struct *vma, struct page *page, 2349block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2324 get_block_t get_block) 2350 get_block_t get_block)
2325{ 2351{
2352 struct page *page = vmf->page;
2326 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 2353 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2327 unsigned long end; 2354 unsigned long end;
2328 loff_t size; 2355 loff_t size;
2329 int ret = -EINVAL; 2356 int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
2330 2357
2331 lock_page(page); 2358 lock_page(page);
2332 size = i_size_read(inode); 2359 size = i_size_read(inode);
@@ -2346,6 +2373,13 @@ block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
2346 if (!ret) 2373 if (!ret)
2347 ret = block_commit_write(page, 0, end); 2374 ret = block_commit_write(page, 0, end);
2348 2375
2376 if (unlikely(ret)) {
2377 if (ret == -ENOMEM)
2378 ret = VM_FAULT_OOM;
2379 else /* -ENOSPC, -EIO, etc */
2380 ret = VM_FAULT_SIGBUS;
2381 }
2382
2349out_unlock: 2383out_unlock:
2350 unlock_page(page); 2384 unlock_page(page);
2351 return ret; 2385 return ret;