aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/writeback.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2007-10-08 12:54:37 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-08 15:58:14 -0400
commita200ee182a016752464a12cb2e8762e48254bb09 (patch)
tree7b273f002625a4c368f7b20b144990f7f4f81df9 /include/linux/writeback.h
parent3eb215de26e6e94bf5fed9cb77230c383b30e53b (diff)
mm: set_page_dirty_balance() vs ->page_mkwrite()
All the current page_mkwrite() implementations also set the page dirty. Which results in the set_page_dirty_balance() call to _not_ call balance, because the page is already found dirty. This allows us to dirty a _lot_ of pages without ever hitting balance_dirty_pages(). Not good (tm). Force a balance call if ->page_mkwrite() was successful. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/writeback.h')
-rw-r--r--include/linux/writeback.h2
1 files changed, 1 insertions, 1 deletions
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 4ef4d22e5e43..b4af6bcb7b7a 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -127,7 +127,7 @@ int sync_page_range(struct inode *inode, struct address_space *mapping,
127 loff_t pos, loff_t count); 127 loff_t pos, loff_t count);
128int sync_page_range_nolock(struct inode *inode, struct address_space *mapping, 128int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
129 loff_t pos, loff_t count); 129 loff_t pos, loff_t count);
130void set_page_dirty_balance(struct page *page); 130void set_page_dirty_balance(struct page *page, int page_mkwrite);
131void writeback_set_ratelimit(void); 131void writeback_set_ratelimit(void);
132 132
133/* pdflush.c */ 133/* pdflush.c */