aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2007-10-17 02:25:50 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-17 11:42:45 -0400
commit3e26c149c358529b1605f8959341d34bc4b880a3 (patch)
tree9d173b1753b86bcf03a8591e2509e3162234447c
parent04fbfdc14e5f48463820d6b9807daa5e9c92c51f (diff)
mm: dirty balancing for tasks
Based on ideas of Andrew: http://marc.info/?l=linux-kernel&m=102912915020543&w=2 Scale the bdi dirty limit inversly with the tasks dirty rate. This makes heavy writers have a lower dirty limit than the occasional writer. Andrea proposed something similar: http://lwn.net/Articles/152277/ The main disadvantage to his patch is that he uses an unrelated quantity to measure time, which leaves him with a workload dependant tunable. Other than that the two approaches appear quite similar. [akpm@linux-foundation.org: fix warning] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/init_task.h1
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/fork.c10
-rw-r--r--mm/page-writeback.c50
4 files changed, 62 insertions, 1 deletions
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 513bc3e489f0..3a619f57a2b2 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -171,6 +171,7 @@ extern struct group_info init_groups;
171 [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ 171 [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
172 [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ 172 [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
173 }, \ 173 }, \
174 .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
174 INIT_TRACE_IRQFLAGS \ 175 INIT_TRACE_IRQFLAGS \
175 INIT_LOCKDEP \ 176 INIT_LOCKDEP \
176} 177}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 592e3a55f818..59738efff8ad 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -74,6 +74,7 @@ struct sched_param {
74#include <linux/pid.h> 74#include <linux/pid.h>
75#include <linux/percpu.h> 75#include <linux/percpu.h>
76#include <linux/topology.h> 76#include <linux/topology.h>
77#include <linux/proportions.h>
77#include <linux/seccomp.h> 78#include <linux/seccomp.h>
78#include <linux/rcupdate.h> 79#include <linux/rcupdate.h>
79#include <linux/futex.h> 80#include <linux/futex.h>
@@ -1149,6 +1150,7 @@ struct task_struct {
1149#ifdef CONFIG_FAULT_INJECTION 1150#ifdef CONFIG_FAULT_INJECTION
1150 int make_it_fail; 1151 int make_it_fail;
1151#endif 1152#endif
1153 struct prop_local_single dirties;
1152}; 1154};
1153 1155
1154/* 1156/*
diff --git a/kernel/fork.c b/kernel/fork.c
index 3fc3c1383912..163325af8179 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -107,6 +107,7 @@ static struct kmem_cache *mm_cachep;
107 107
108void free_task(struct task_struct *tsk) 108void free_task(struct task_struct *tsk)
109{ 109{
110 prop_local_destroy_single(&tsk->dirties);
110 free_thread_info(tsk->stack); 111 free_thread_info(tsk->stack);
111 rt_mutex_debug_task_free(tsk); 112 rt_mutex_debug_task_free(tsk);
112 free_task_struct(tsk); 113 free_task_struct(tsk);
@@ -163,6 +164,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
163{ 164{
164 struct task_struct *tsk; 165 struct task_struct *tsk;
165 struct thread_info *ti; 166 struct thread_info *ti;
167 int err;
166 168
167 prepare_to_copy(orig); 169 prepare_to_copy(orig);
168 170
@@ -178,6 +180,14 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
178 180
179 *tsk = *orig; 181 *tsk = *orig;
180 tsk->stack = ti; 182 tsk->stack = ti;
183
184 err = prop_local_init_single(&tsk->dirties);
185 if (err) {
186 free_thread_info(ti);
187 free_task_struct(tsk);
188 return NULL;
189 }
190
181 setup_thread_stack(tsk, orig); 191 setup_thread_stack(tsk, orig);
182 192
183#ifdef CONFIG_CC_STACKPROTECTOR 193#ifdef CONFIG_CC_STACKPROTECTOR
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index b0360546ac86..4073d531cd7b 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -118,6 +118,7 @@ static void background_writeout(unsigned long _min_pages);
118 * 118 *
119 */ 119 */
120static struct prop_descriptor vm_completions; 120static struct prop_descriptor vm_completions;
121static struct prop_descriptor vm_dirties;
121 122
122static unsigned long determine_dirtyable_memory(void); 123static unsigned long determine_dirtyable_memory(void);
123 124
@@ -146,6 +147,7 @@ int dirty_ratio_handler(struct ctl_table *table, int write,
146 if (ret == 0 && write && vm_dirty_ratio != old_ratio) { 147 if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
147 int shift = calc_period_shift(); 148 int shift = calc_period_shift();
148 prop_change_shift(&vm_completions, shift); 149 prop_change_shift(&vm_completions, shift);
150 prop_change_shift(&vm_dirties, shift);
149 } 151 }
150 return ret; 152 return ret;
151} 153}
@@ -159,6 +161,11 @@ static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
159 __prop_inc_percpu(&vm_completions, &bdi->completions); 161 __prop_inc_percpu(&vm_completions, &bdi->completions);
160} 162}
161 163
164static inline void task_dirty_inc(struct task_struct *tsk)
165{
166 prop_inc_single(&vm_dirties, &tsk->dirties);
167}
168
162/* 169/*
163 * Obtain an accurate fraction of the BDI's portion. 170 * Obtain an accurate fraction of the BDI's portion.
164 */ 171 */
@@ -198,6 +205,37 @@ clip_bdi_dirty_limit(struct backing_dev_info *bdi, long dirty, long *pbdi_dirty)
198 *pbdi_dirty = min(*pbdi_dirty, avail_dirty); 205 *pbdi_dirty = min(*pbdi_dirty, avail_dirty);
199} 206}
200 207
208static inline void task_dirties_fraction(struct task_struct *tsk,
209 long *numerator, long *denominator)
210{
211 prop_fraction_single(&vm_dirties, &tsk->dirties,
212 numerator, denominator);
213}
214
215/*
216 * scale the dirty limit
217 *
218 * task specific dirty limit:
219 *
220 * dirty -= (dirty/8) * p_{t}
221 */
222void task_dirty_limit(struct task_struct *tsk, long *pdirty)
223{
224 long numerator, denominator;
225 long dirty = *pdirty;
226 u64 inv = dirty >> 3;
227
228 task_dirties_fraction(tsk, &numerator, &denominator);
229 inv *= numerator;
230 do_div(inv, denominator);
231
232 dirty -= inv;
233 if (dirty < *pdirty/2)
234 dirty = *pdirty/2;
235
236 *pdirty = dirty;
237}
238
201/* 239/*
202 * Work out the current dirty-memory clamping and background writeout 240 * Work out the current dirty-memory clamping and background writeout
203 * thresholds. 241 * thresholds.
@@ -304,6 +342,7 @@ get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty,
304 342
305 *pbdi_dirty = bdi_dirty; 343 *pbdi_dirty = bdi_dirty;
306 clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty); 344 clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty);
345 task_dirty_limit(current, pbdi_dirty);
307 } 346 }
308} 347}
309 348
@@ -720,6 +759,7 @@ void __init page_writeback_init(void)
720 759
721 shift = calc_period_shift(); 760 shift = calc_period_shift();
722 prop_descriptor_init(&vm_completions, shift); 761 prop_descriptor_init(&vm_completions, shift);
762 prop_descriptor_init(&vm_dirties, shift);
723} 763}
724 764
725/** 765/**
@@ -998,7 +1038,7 @@ EXPORT_SYMBOL(redirty_page_for_writepage);
998 * If the mapping doesn't provide a set_page_dirty a_op, then 1038 * If the mapping doesn't provide a set_page_dirty a_op, then
999 * just fall through and assume that it wants buffer_heads. 1039 * just fall through and assume that it wants buffer_heads.
1000 */ 1040 */
1001int fastcall set_page_dirty(struct page *page) 1041static int __set_page_dirty(struct page *page)
1002{ 1042{
1003 struct address_space *mapping = page_mapping(page); 1043 struct address_space *mapping = page_mapping(page);
1004 1044
@@ -1016,6 +1056,14 @@ int fastcall set_page_dirty(struct page *page)
1016 } 1056 }
1017 return 0; 1057 return 0;
1018} 1058}
1059
1060int fastcall set_page_dirty(struct page *page)
1061{
1062 int ret = __set_page_dirty(page);
1063 if (ret)
1064 task_dirty_inc(current);
1065 return ret;
1066}
1019EXPORT_SYMBOL(set_page_dirty); 1067EXPORT_SYMBOL(set_page_dirty);
1020 1068
1021/* 1069/*