aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c55
1 files changed, 40 insertions, 15 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index e630188ccc40..488b7088557c 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -23,6 +23,7 @@
23#include <linux/backing-dev.h> 23#include <linux/backing-dev.h>
24#include <linux/blkdev.h> 24#include <linux/blkdev.h>
25#include <linux/mpage.h> 25#include <linux/mpage.h>
26#include <linux/rmap.h>
26#include <linux/percpu.h> 27#include <linux/percpu.h>
27#include <linux/notifier.h> 28#include <linux/notifier.h>
28#include <linux/smp.h> 29#include <linux/smp.h>
@@ -45,7 +46,6 @@
45 */ 46 */
46static long ratelimit_pages = 32; 47static long ratelimit_pages = 32;
47 48
48static long total_pages; /* The total number of pages in the machine. */
49static int dirty_exceeded __cacheline_aligned_in_smp; /* Dirty mem may be over limit */ 49static int dirty_exceeded __cacheline_aligned_in_smp; /* Dirty mem may be over limit */
50 50
51/* 51/*
@@ -125,7 +125,7 @@ get_dirty_limits(long *pbackground, long *pdirty,
125 int unmapped_ratio; 125 int unmapped_ratio;
126 long background; 126 long background;
127 long dirty; 127 long dirty;
128 unsigned long available_memory = total_pages; 128 unsigned long available_memory = vm_total_pages;
129 struct task_struct *tsk; 129 struct task_struct *tsk;
130 130
131#ifdef CONFIG_HIGHMEM 131#ifdef CONFIG_HIGHMEM
@@ -140,7 +140,7 @@ get_dirty_limits(long *pbackground, long *pdirty,
140 140
141 unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) + 141 unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) +
142 global_page_state(NR_ANON_PAGES)) * 100) / 142 global_page_state(NR_ANON_PAGES)) * 100) /
143 total_pages; 143 vm_total_pages;
144 144
145 dirty_ratio = vm_dirty_ratio; 145 dirty_ratio = vm_dirty_ratio;
146 if (dirty_ratio > unmapped_ratio / 2) 146 if (dirty_ratio > unmapped_ratio / 2)
@@ -243,6 +243,16 @@ static void balance_dirty_pages(struct address_space *mapping)
243 pdflush_operation(background_writeout, 0); 243 pdflush_operation(background_writeout, 0);
244} 244}
245 245
246void set_page_dirty_balance(struct page *page)
247{
248 if (set_page_dirty(page)) {
249 struct address_space *mapping = page_mapping(page);
250
251 if (mapping)
252 balance_dirty_pages_ratelimited(mapping);
253 }
254}
255
246/** 256/**
247 * balance_dirty_pages_ratelimited_nr - balance dirty memory state 257 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
248 * @mapping: address_space which was dirtied 258 * @mapping: address_space which was dirtied
@@ -491,9 +501,9 @@ void laptop_sync_completion(void)
491 * will write six megabyte chunks, max. 501 * will write six megabyte chunks, max.
492 */ 502 */
493 503
494static void set_ratelimit(void) 504void writeback_set_ratelimit(void)
495{ 505{
496 ratelimit_pages = total_pages / (num_online_cpus() * 32); 506 ratelimit_pages = vm_total_pages / (num_online_cpus() * 32);
497 if (ratelimit_pages < 16) 507 if (ratelimit_pages < 16)
498 ratelimit_pages = 16; 508 ratelimit_pages = 16;
499 if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024) 509 if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
@@ -503,7 +513,7 @@ static void set_ratelimit(void)
503static int __cpuinit 513static int __cpuinit
504ratelimit_handler(struct notifier_block *self, unsigned long u, void *v) 514ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
505{ 515{
506 set_ratelimit(); 516 writeback_set_ratelimit();
507 return 0; 517 return 0;
508} 518}
509 519
@@ -522,9 +532,7 @@ void __init page_writeback_init(void)
522 long buffer_pages = nr_free_buffer_pages(); 532 long buffer_pages = nr_free_buffer_pages();
523 long correction; 533 long correction;
524 534
525 total_pages = nr_free_pagecache_pages(); 535 correction = (100 * 4 * buffer_pages) / vm_total_pages;
526
527 correction = (100 * 4 * buffer_pages) / total_pages;
528 536
529 if (correction < 100) { 537 if (correction < 100) {
530 dirty_background_ratio *= correction; 538 dirty_background_ratio *= correction;
@@ -538,7 +546,7 @@ void __init page_writeback_init(void)
538 vm_dirty_ratio = 1; 546 vm_dirty_ratio = 1;
539 } 547 }
540 mod_timer(&wb_timer, jiffies + dirty_writeback_interval); 548 mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
541 set_ratelimit(); 549 writeback_set_ratelimit();
542 register_cpu_notifier(&ratelimit_nb); 550 register_cpu_notifier(&ratelimit_nb);
543} 551}
544 552
@@ -550,7 +558,7 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
550 return 0; 558 return 0;
551 wbc->for_writepages = 1; 559 wbc->for_writepages = 1;
552 if (mapping->a_ops->writepages) 560 if (mapping->a_ops->writepages)
553 ret = mapping->a_ops->writepages(mapping, wbc); 561 ret = mapping->a_ops->writepages(mapping, wbc);
554 else 562 else
555 ret = generic_writepages(mapping, wbc); 563 ret = generic_writepages(mapping, wbc);
556 wbc->for_writepages = 0; 564 wbc->for_writepages = 0;
@@ -690,7 +698,7 @@ int set_page_dirty_lock(struct page *page)
690{ 698{
691 int ret; 699 int ret;
692 700
693 lock_page(page); 701 lock_page_nosync(page);
694 ret = set_page_dirty(page); 702 ret = set_page_dirty(page);
695 unlock_page(page); 703 unlock_page(page);
696 return ret; 704 return ret;
@@ -712,9 +720,15 @@ int test_clear_page_dirty(struct page *page)
712 radix_tree_tag_clear(&mapping->page_tree, 720 radix_tree_tag_clear(&mapping->page_tree,
713 page_index(page), 721 page_index(page),
714 PAGECACHE_TAG_DIRTY); 722 PAGECACHE_TAG_DIRTY);
715 if (mapping_cap_account_dirty(mapping))
716 __dec_zone_page_state(page, NR_FILE_DIRTY);
717 write_unlock_irqrestore(&mapping->tree_lock, flags); 723 write_unlock_irqrestore(&mapping->tree_lock, flags);
724 /*
725 * We can continue to use `mapping' here because the
726 * page is locked, which pins the address_space
727 */
728 if (mapping_cap_account_dirty(mapping)) {
729 page_mkclean(page);
730 dec_zone_page_state(page, NR_FILE_DIRTY);
731 }
718 return 1; 732 return 1;
719 } 733 }
720 write_unlock_irqrestore(&mapping->tree_lock, flags); 734 write_unlock_irqrestore(&mapping->tree_lock, flags);
@@ -744,8 +758,10 @@ int clear_page_dirty_for_io(struct page *page)
744 758
745 if (mapping) { 759 if (mapping) {
746 if (TestClearPageDirty(page)) { 760 if (TestClearPageDirty(page)) {
747 if (mapping_cap_account_dirty(mapping)) 761 if (mapping_cap_account_dirty(mapping)) {
762 page_mkclean(page);
748 dec_zone_page_state(page, NR_FILE_DIRTY); 763 dec_zone_page_state(page, NR_FILE_DIRTY);
764 }
749 return 1; 765 return 1;
750 } 766 }
751 return 0; 767 return 0;
@@ -803,6 +819,15 @@ int test_set_page_writeback(struct page *page)
803EXPORT_SYMBOL(test_set_page_writeback); 819EXPORT_SYMBOL(test_set_page_writeback);
804 820
805/* 821/*
822 * Wakes up tasks that are being throttled due to writeback congestion
823 */
824void writeback_congestion_end(void)
825{
826 blk_congestion_end(WRITE);
827}
828EXPORT_SYMBOL(writeback_congestion_end);
829
830/*
806 * Return true if any of the pages in the mapping are marged with the 831 * Return true if any of the pages in the mapping are marged with the
807 * passed tag. 832 * passed tag.
808 */ 833 */