aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c25
1 files changed, 16 insertions, 9 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 789b6adbef37..24de8b65fdbd 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -126,8 +126,6 @@ static void background_writeout(unsigned long _min_pages);
126static struct prop_descriptor vm_completions; 126static struct prop_descriptor vm_completions;
127static struct prop_descriptor vm_dirties; 127static struct prop_descriptor vm_dirties;
128 128
129static unsigned long determine_dirtyable_memory(void);
130
131/* 129/*
132 * couple the period to the dirty_ratio: 130 * couple the period to the dirty_ratio:
133 * 131 *
@@ -347,7 +345,13 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
347#endif 345#endif
348} 346}
349 347
350static unsigned long determine_dirtyable_memory(void) 348/**
349 * determine_dirtyable_memory - amount of memory that may be used
350 *
351 * Returns the numebr of pages that can currently be freed and used
352 * by the kernel for direct mappings.
353 */
354unsigned long determine_dirtyable_memory(void)
351{ 355{
352 unsigned long x; 356 unsigned long x;
353 357
@@ -956,6 +960,9 @@ retry:
956 } 960 }
957 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 961 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
958 mapping->writeback_index = index; 962 mapping->writeback_index = index;
963
964 if (wbc->range_cont)
965 wbc->range_start = index << PAGE_CACHE_SHIFT;
959 return ret; 966 return ret;
960} 967}
961EXPORT_SYMBOL(write_cache_pages); 968EXPORT_SYMBOL(write_cache_pages);
@@ -1081,7 +1088,7 @@ int __set_page_dirty_nobuffers(struct page *page)
1081 if (!mapping) 1088 if (!mapping)
1082 return 1; 1089 return 1;
1083 1090
1084 write_lock_irq(&mapping->tree_lock); 1091 spin_lock_irq(&mapping->tree_lock);
1085 mapping2 = page_mapping(page); 1092 mapping2 = page_mapping(page);
1086 if (mapping2) { /* Race with truncate? */ 1093 if (mapping2) { /* Race with truncate? */
1087 BUG_ON(mapping2 != mapping); 1094 BUG_ON(mapping2 != mapping);
@@ -1095,7 +1102,7 @@ int __set_page_dirty_nobuffers(struct page *page)
1095 radix_tree_tag_set(&mapping->page_tree, 1102 radix_tree_tag_set(&mapping->page_tree,
1096 page_index(page), PAGECACHE_TAG_DIRTY); 1103 page_index(page), PAGECACHE_TAG_DIRTY);
1097 } 1104 }
1098 write_unlock_irq(&mapping->tree_lock); 1105 spin_unlock_irq(&mapping->tree_lock);
1099 if (mapping->host) { 1106 if (mapping->host) {
1100 /* !PageAnon && !swapper_space */ 1107 /* !PageAnon && !swapper_space */
1101 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 1108 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
@@ -1251,7 +1258,7 @@ int test_clear_page_writeback(struct page *page)
1251 struct backing_dev_info *bdi = mapping->backing_dev_info; 1258 struct backing_dev_info *bdi = mapping->backing_dev_info;
1252 unsigned long flags; 1259 unsigned long flags;
1253 1260
1254 write_lock_irqsave(&mapping->tree_lock, flags); 1261 spin_lock_irqsave(&mapping->tree_lock, flags);
1255 ret = TestClearPageWriteback(page); 1262 ret = TestClearPageWriteback(page);
1256 if (ret) { 1263 if (ret) {
1257 radix_tree_tag_clear(&mapping->page_tree, 1264 radix_tree_tag_clear(&mapping->page_tree,
@@ -1262,7 +1269,7 @@ int test_clear_page_writeback(struct page *page)
1262 __bdi_writeout_inc(bdi); 1269 __bdi_writeout_inc(bdi);
1263 } 1270 }
1264 } 1271 }
1265 write_unlock_irqrestore(&mapping->tree_lock, flags); 1272 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1266 } else { 1273 } else {
1267 ret = TestClearPageWriteback(page); 1274 ret = TestClearPageWriteback(page);
1268 } 1275 }
@@ -1280,7 +1287,7 @@ int test_set_page_writeback(struct page *page)
1280 struct backing_dev_info *bdi = mapping->backing_dev_info; 1287 struct backing_dev_info *bdi = mapping->backing_dev_info;
1281 unsigned long flags; 1288 unsigned long flags;
1282 1289
1283 write_lock_irqsave(&mapping->tree_lock, flags); 1290 spin_lock_irqsave(&mapping->tree_lock, flags);
1284 ret = TestSetPageWriteback(page); 1291 ret = TestSetPageWriteback(page);
1285 if (!ret) { 1292 if (!ret) {
1286 radix_tree_tag_set(&mapping->page_tree, 1293 radix_tree_tag_set(&mapping->page_tree,
@@ -1293,7 +1300,7 @@ int test_set_page_writeback(struct page *page)
1293 radix_tree_tag_clear(&mapping->page_tree, 1300 radix_tree_tag_clear(&mapping->page_tree,
1294 page_index(page), 1301 page_index(page),
1295 PAGECACHE_TAG_DIRTY); 1302 PAGECACHE_TAG_DIRTY);
1296 write_unlock_irqrestore(&mapping->tree_lock, flags); 1303 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1297 } else { 1304 } else {
1298 ret = TestSetPageWriteback(page); 1305 ret = TestSetPageWriteback(page);
1299 } 1306 }