diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2007-10-17 02:25:50 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-17 11:42:45 -0400 |
commit | 3e26c149c358529b1605f8959341d34bc4b880a3 (patch) | |
tree | 9d173b1753b86bcf03a8591e2509e3162234447c /mm | |
parent | 04fbfdc14e5f48463820d6b9807daa5e9c92c51f (diff) |
mm: dirty balancing for tasks
Based on ideas of Andrew:
http://marc.info/?l=linux-kernel&m=102912915020543&w=2
Scale the bdi dirty limit inversly with the tasks dirty rate.
This makes heavy writers have a lower dirty limit than the occasional writer.
Andrea proposed something similar:
http://lwn.net/Articles/152277/
The main disadvantage to his patch is that he uses an unrelated quantity to
measure time, which leaves him with a workload dependant tunable. Other than
that the two approaches appear quite similar.
[akpm@linux-foundation.org: fix warning]
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page-writeback.c | 50 |
1 files changed, 49 insertions, 1 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index b0360546ac86..4073d531cd7b 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -118,6 +118,7 @@ static void background_writeout(unsigned long _min_pages); | |||
118 | * | 118 | * |
119 | */ | 119 | */ |
120 | static struct prop_descriptor vm_completions; | 120 | static struct prop_descriptor vm_completions; |
121 | static struct prop_descriptor vm_dirties; | ||
121 | 122 | ||
122 | static unsigned long determine_dirtyable_memory(void); | 123 | static unsigned long determine_dirtyable_memory(void); |
123 | 124 | ||
@@ -146,6 +147,7 @@ int dirty_ratio_handler(struct ctl_table *table, int write, | |||
146 | if (ret == 0 && write && vm_dirty_ratio != old_ratio) { | 147 | if (ret == 0 && write && vm_dirty_ratio != old_ratio) { |
147 | int shift = calc_period_shift(); | 148 | int shift = calc_period_shift(); |
148 | prop_change_shift(&vm_completions, shift); | 149 | prop_change_shift(&vm_completions, shift); |
150 | prop_change_shift(&vm_dirties, shift); | ||
149 | } | 151 | } |
150 | return ret; | 152 | return ret; |
151 | } | 153 | } |
@@ -159,6 +161,11 @@ static inline void __bdi_writeout_inc(struct backing_dev_info *bdi) | |||
159 | __prop_inc_percpu(&vm_completions, &bdi->completions); | 161 | __prop_inc_percpu(&vm_completions, &bdi->completions); |
160 | } | 162 | } |
161 | 163 | ||
164 | static inline void task_dirty_inc(struct task_struct *tsk) | ||
165 | { | ||
166 | prop_inc_single(&vm_dirties, &tsk->dirties); | ||
167 | } | ||
168 | |||
162 | /* | 169 | /* |
163 | * Obtain an accurate fraction of the BDI's portion. | 170 | * Obtain an accurate fraction of the BDI's portion. |
164 | */ | 171 | */ |
@@ -198,6 +205,37 @@ clip_bdi_dirty_limit(struct backing_dev_info *bdi, long dirty, long *pbdi_dirty) | |||
198 | *pbdi_dirty = min(*pbdi_dirty, avail_dirty); | 205 | *pbdi_dirty = min(*pbdi_dirty, avail_dirty); |
199 | } | 206 | } |
200 | 207 | ||
208 | static inline void task_dirties_fraction(struct task_struct *tsk, | ||
209 | long *numerator, long *denominator) | ||
210 | { | ||
211 | prop_fraction_single(&vm_dirties, &tsk->dirties, | ||
212 | numerator, denominator); | ||
213 | } | ||
214 | |||
215 | /* | ||
216 | * scale the dirty limit | ||
217 | * | ||
218 | * task specific dirty limit: | ||
219 | * | ||
220 | * dirty -= (dirty/8) * p_{t} | ||
221 | */ | ||
222 | void task_dirty_limit(struct task_struct *tsk, long *pdirty) | ||
223 | { | ||
224 | long numerator, denominator; | ||
225 | long dirty = *pdirty; | ||
226 | u64 inv = dirty >> 3; | ||
227 | |||
228 | task_dirties_fraction(tsk, &numerator, &denominator); | ||
229 | inv *= numerator; | ||
230 | do_div(inv, denominator); | ||
231 | |||
232 | dirty -= inv; | ||
233 | if (dirty < *pdirty/2) | ||
234 | dirty = *pdirty/2; | ||
235 | |||
236 | *pdirty = dirty; | ||
237 | } | ||
238 | |||
201 | /* | 239 | /* |
202 | * Work out the current dirty-memory clamping and background writeout | 240 | * Work out the current dirty-memory clamping and background writeout |
203 | * thresholds. | 241 | * thresholds. |
@@ -304,6 +342,7 @@ get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty, | |||
304 | 342 | ||
305 | *pbdi_dirty = bdi_dirty; | 343 | *pbdi_dirty = bdi_dirty; |
306 | clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty); | 344 | clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty); |
345 | task_dirty_limit(current, pbdi_dirty); | ||
307 | } | 346 | } |
308 | } | 347 | } |
309 | 348 | ||
@@ -720,6 +759,7 @@ void __init page_writeback_init(void) | |||
720 | 759 | ||
721 | shift = calc_period_shift(); | 760 | shift = calc_period_shift(); |
722 | prop_descriptor_init(&vm_completions, shift); | 761 | prop_descriptor_init(&vm_completions, shift); |
762 | prop_descriptor_init(&vm_dirties, shift); | ||
723 | } | 763 | } |
724 | 764 | ||
725 | /** | 765 | /** |
@@ -998,7 +1038,7 @@ EXPORT_SYMBOL(redirty_page_for_writepage); | |||
998 | * If the mapping doesn't provide a set_page_dirty a_op, then | 1038 | * If the mapping doesn't provide a set_page_dirty a_op, then |
999 | * just fall through and assume that it wants buffer_heads. | 1039 | * just fall through and assume that it wants buffer_heads. |
1000 | */ | 1040 | */ |
1001 | int fastcall set_page_dirty(struct page *page) | 1041 | static int __set_page_dirty(struct page *page) |
1002 | { | 1042 | { |
1003 | struct address_space *mapping = page_mapping(page); | 1043 | struct address_space *mapping = page_mapping(page); |
1004 | 1044 | ||
@@ -1016,6 +1056,14 @@ int fastcall set_page_dirty(struct page *page) | |||
1016 | } | 1056 | } |
1017 | return 0; | 1057 | return 0; |
1018 | } | 1058 | } |
1059 | |||
1060 | int fastcall set_page_dirty(struct page *page) | ||
1061 | { | ||
1062 | int ret = __set_page_dirty(page); | ||
1063 | if (ret) | ||
1064 | task_dirty_inc(current); | ||
1065 | return ret; | ||
1066 | } | ||
1019 | EXPORT_SYMBOL(set_page_dirty); | 1067 | EXPORT_SYMBOL(set_page_dirty); |
1020 | 1068 | ||
1021 | /* | 1069 | /* |