diff options
author | Len Brown <len.brown@intel.com> | 2009-04-05 02:14:15 -0400 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2009-04-05 02:14:15 -0400 |
commit | 478c6a43fcbc6c11609f8cee7c7b57223907754f (patch) | |
tree | a7f7952099da60d33032aed6de9c0c56c9f8779e /mm/page-writeback.c | |
parent | 8a3f257c704e02aee9869decd069a806b45be3f1 (diff) | |
parent | 6bb597507f9839b13498781e481f5458aea33620 (diff) |
Merge branch 'linus' into release
Conflicts:
arch/x86/kernel/cpu/cpufreq/longhaul.c
Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r-- | mm/page-writeback.c | 46 |
1 files changed, 28 insertions, 18 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 74dc57c74349..30351f0063ac 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -66,7 +66,7 @@ static inline long sync_writeback_pages(void) | |||
66 | /* | 66 | /* |
67 | * Start background writeback (via pdflush) at this percentage | 67 | * Start background writeback (via pdflush) at this percentage |
68 | */ | 68 | */ |
69 | int dirty_background_ratio = 5; | 69 | int dirty_background_ratio = 10; |
70 | 70 | ||
71 | /* | 71 | /* |
72 | * dirty_background_bytes starts at 0 (disabled) so that it is a function of | 72 | * dirty_background_bytes starts at 0 (disabled) so that it is a function of |
@@ -83,7 +83,7 @@ int vm_highmem_is_dirtyable; | |||
83 | /* | 83 | /* |
84 | * The generator of dirty data starts writeback at this percentage | 84 | * The generator of dirty data starts writeback at this percentage |
85 | */ | 85 | */ |
86 | int vm_dirty_ratio = 10; | 86 | int vm_dirty_ratio = 20; |
87 | 87 | ||
88 | /* | 88 | /* |
89 | * vm_dirty_bytes starts at 0 (disabled) so that it is a function of | 89 | * vm_dirty_bytes starts at 0 (disabled) so that it is a function of |
@@ -92,14 +92,14 @@ int vm_dirty_ratio = 10; | |||
92 | unsigned long vm_dirty_bytes; | 92 | unsigned long vm_dirty_bytes; |
93 | 93 | ||
94 | /* | 94 | /* |
95 | * The interval between `kupdate'-style writebacks, in jiffies | 95 | * The interval between `kupdate'-style writebacks |
96 | */ | 96 | */ |
97 | int dirty_writeback_interval = 5 * HZ; | 97 | unsigned int dirty_writeback_interval = 5 * 100; /* sentiseconds */ |
98 | 98 | ||
99 | /* | 99 | /* |
100 | * The longest number of jiffies for which data is allowed to remain dirty | 100 | * The longest time for which data is allowed to remain dirty |
101 | */ | 101 | */ |
102 | int dirty_expire_interval = 30 * HZ; | 102 | unsigned int dirty_expire_interval = 30 * 100; /* sentiseconds */ |
103 | 103 | ||
104 | /* | 104 | /* |
105 | * Flag that makes the machine dump writes/reads and block dirtyings. | 105 | * Flag that makes the machine dump writes/reads and block dirtyings. |
@@ -770,9 +770,9 @@ static void wb_kupdate(unsigned long arg) | |||
770 | 770 | ||
771 | sync_supers(); | 771 | sync_supers(); |
772 | 772 | ||
773 | oldest_jif = jiffies - dirty_expire_interval; | 773 | oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval); |
774 | start_jif = jiffies; | 774 | start_jif = jiffies; |
775 | next_jif = start_jif + dirty_writeback_interval; | 775 | next_jif = start_jif + msecs_to_jiffies(dirty_writeback_interval * 10); |
776 | nr_to_write = global_page_state(NR_FILE_DIRTY) + | 776 | nr_to_write = global_page_state(NR_FILE_DIRTY) + |
777 | global_page_state(NR_UNSTABLE_NFS) + | 777 | global_page_state(NR_UNSTABLE_NFS) + |
778 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); | 778 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); |
@@ -801,9 +801,10 @@ static void wb_kupdate(unsigned long arg) | |||
801 | int dirty_writeback_centisecs_handler(ctl_table *table, int write, | 801 | int dirty_writeback_centisecs_handler(ctl_table *table, int write, |
802 | struct file *file, void __user *buffer, size_t *length, loff_t *ppos) | 802 | struct file *file, void __user *buffer, size_t *length, loff_t *ppos) |
803 | { | 803 | { |
804 | proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos); | 804 | proc_dointvec(table, write, file, buffer, length, ppos); |
805 | if (dirty_writeback_interval) | 805 | if (dirty_writeback_interval) |
806 | mod_timer(&wb_timer, jiffies + dirty_writeback_interval); | 806 | mod_timer(&wb_timer, jiffies + |
807 | msecs_to_jiffies(dirty_writeback_interval * 10)); | ||
807 | else | 808 | else |
808 | del_timer(&wb_timer); | 809 | del_timer(&wb_timer); |
809 | return 0; | 810 | return 0; |
@@ -905,7 +906,8 @@ void __init page_writeback_init(void) | |||
905 | { | 906 | { |
906 | int shift; | 907 | int shift; |
907 | 908 | ||
908 | mod_timer(&wb_timer, jiffies + dirty_writeback_interval); | 909 | mod_timer(&wb_timer, |
910 | jiffies + msecs_to_jiffies(dirty_writeback_interval * 10)); | ||
909 | writeback_set_ratelimit(); | 911 | writeback_set_ratelimit(); |
910 | register_cpu_notifier(&ratelimit_nb); | 912 | register_cpu_notifier(&ratelimit_nb); |
911 | 913 | ||
@@ -1198,6 +1200,20 @@ int __set_page_dirty_no_writeback(struct page *page) | |||
1198 | } | 1200 | } |
1199 | 1201 | ||
1200 | /* | 1202 | /* |
1203 | * Helper function for set_page_dirty family. | ||
1204 | * NOTE: This relies on being atomic wrt interrupts. | ||
1205 | */ | ||
1206 | void account_page_dirtied(struct page *page, struct address_space *mapping) | ||
1207 | { | ||
1208 | if (mapping_cap_account_dirty(mapping)) { | ||
1209 | __inc_zone_page_state(page, NR_FILE_DIRTY); | ||
1210 | __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); | ||
1211 | task_dirty_inc(current); | ||
1212 | task_io_account_write(PAGE_CACHE_SIZE); | ||
1213 | } | ||
1214 | } | ||
1215 | |||
1216 | /* | ||
1201 | * For address_spaces which do not use buffers. Just tag the page as dirty in | 1217 | * For address_spaces which do not use buffers. Just tag the page as dirty in |
1202 | * its radix tree. | 1218 | * its radix tree. |
1203 | * | 1219 | * |
@@ -1226,13 +1242,7 @@ int __set_page_dirty_nobuffers(struct page *page) | |||
1226 | if (mapping2) { /* Race with truncate? */ | 1242 | if (mapping2) { /* Race with truncate? */ |
1227 | BUG_ON(mapping2 != mapping); | 1243 | BUG_ON(mapping2 != mapping); |
1228 | WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); | 1244 | WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); |
1229 | if (mapping_cap_account_dirty(mapping)) { | 1245 | account_page_dirtied(page, mapping); |
1230 | __inc_zone_page_state(page, NR_FILE_DIRTY); | ||
1231 | __inc_bdi_stat(mapping->backing_dev_info, | ||
1232 | BDI_RECLAIMABLE); | ||
1233 | task_dirty_inc(current); | ||
1234 | task_io_account_write(PAGE_CACHE_SIZE); | ||
1235 | } | ||
1236 | radix_tree_tag_set(&mapping->page_tree, | 1246 | radix_tree_tag_set(&mapping->page_tree, |
1237 | page_index(page), PAGECACHE_TAG_DIRTY); | 1247 | page_index(page), PAGECACHE_TAG_DIRTY); |
1238 | } | 1248 | } |