aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c50
1 files changed, 49 insertions, 1 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index b0360546ac86..4073d531cd7b 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -118,6 +118,7 @@ static void background_writeout(unsigned long _min_pages);
118 * 118 *
119 */ 119 */
120static struct prop_descriptor vm_completions; 120static struct prop_descriptor vm_completions;
121static struct prop_descriptor vm_dirties;
121 122
122static unsigned long determine_dirtyable_memory(void); 123static unsigned long determine_dirtyable_memory(void);
123 124
@@ -146,6 +147,7 @@ int dirty_ratio_handler(struct ctl_table *table, int write,
146 if (ret == 0 && write && vm_dirty_ratio != old_ratio) { 147 if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
147 int shift = calc_period_shift(); 148 int shift = calc_period_shift();
148 prop_change_shift(&vm_completions, shift); 149 prop_change_shift(&vm_completions, shift);
150 prop_change_shift(&vm_dirties, shift);
149 } 151 }
150 return ret; 152 return ret;
151} 153}
@@ -159,6 +161,11 @@ static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
159 __prop_inc_percpu(&vm_completions, &bdi->completions); 161 __prop_inc_percpu(&vm_completions, &bdi->completions);
160} 162}
161 163
164static inline void task_dirty_inc(struct task_struct *tsk)
165{
166 prop_inc_single(&vm_dirties, &tsk->dirties);
167}
168
162/* 169/*
163 * Obtain an accurate fraction of the BDI's portion. 170 * Obtain an accurate fraction of the BDI's portion.
164 */ 171 */
@@ -198,6 +205,37 @@ clip_bdi_dirty_limit(struct backing_dev_info *bdi, long dirty, long *pbdi_dirty)
198 *pbdi_dirty = min(*pbdi_dirty, avail_dirty); 205 *pbdi_dirty = min(*pbdi_dirty, avail_dirty);
199} 206}
200 207
208static inline void task_dirties_fraction(struct task_struct *tsk,
209 long *numerator, long *denominator)
210{
211 prop_fraction_single(&vm_dirties, &tsk->dirties,
212 numerator, denominator);
213}
214
215/*
216 * scale the dirty limit
217 *
218 * task specific dirty limit:
219 *
220 * dirty -= (dirty/8) * p_{t}
221 */
222void task_dirty_limit(struct task_struct *tsk, long *pdirty)
223{
224 long numerator, denominator;
225 long dirty = *pdirty;
226 u64 inv = dirty >> 3;
227
228 task_dirties_fraction(tsk, &numerator, &denominator);
229 inv *= numerator;
230 do_div(inv, denominator);
231
232 dirty -= inv;
233 if (dirty < *pdirty/2)
234 dirty = *pdirty/2;
235
236 *pdirty = dirty;
237}
238
201/* 239/*
202 * Work out the current dirty-memory clamping and background writeout 240 * Work out the current dirty-memory clamping and background writeout
203 * thresholds. 241 * thresholds.
@@ -304,6 +342,7 @@ get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty,
304 342
305 *pbdi_dirty = bdi_dirty; 343 *pbdi_dirty = bdi_dirty;
306 clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty); 344 clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty);
345 task_dirty_limit(current, pbdi_dirty);
307 } 346 }
308} 347}
309 348
@@ -720,6 +759,7 @@ void __init page_writeback_init(void)
720 759
721 shift = calc_period_shift(); 760 shift = calc_period_shift();
722 prop_descriptor_init(&vm_completions, shift); 761 prop_descriptor_init(&vm_completions, shift);
762 prop_descriptor_init(&vm_dirties, shift);
723} 763}
724 764
725/** 765/**
@@ -998,7 +1038,7 @@ EXPORT_SYMBOL(redirty_page_for_writepage);
998 * If the mapping doesn't provide a set_page_dirty a_op, then 1038 * If the mapping doesn't provide a set_page_dirty a_op, then
999 * just fall through and assume that it wants buffer_heads. 1039 * just fall through and assume that it wants buffer_heads.
1000 */ 1040 */
1001int fastcall set_page_dirty(struct page *page) 1041static int __set_page_dirty(struct page *page)
1002{ 1042{
1003 struct address_space *mapping = page_mapping(page); 1043 struct address_space *mapping = page_mapping(page);
1004 1044
@@ -1016,6 +1056,14 @@ int fastcall set_page_dirty(struct page *page)
1016 } 1056 }
1017 return 0; 1057 return 0;
1018} 1058}
1059
1060int fastcall set_page_dirty(struct page *page)
1061{
1062 int ret = __set_page_dirty(page);
1063 if (ret)
1064 task_dirty_inc(current);
1065 return ret;
1066}
1019EXPORT_SYMBOL(set_page_dirty); 1067EXPORT_SYMBOL(set_page_dirty);
1020 1068
1021/* 1069/*
an> klist_node *n) { struct klist_waiter waiter; waiter.node = n; waiter.process = current; waiter.woken = 0; spin_lock(&klist_remove_lock); list_add(&waiter.list, &klist_remove_waiters); spin_unlock(&klist_remove_lock); klist_del(n); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); if (waiter.woken) break; schedule(); } __set_current_state(TASK_RUNNING); } EXPORT_SYMBOL_GPL(klist_remove); /** * klist_node_attached - Say whether a node is bound to a list or not. * @n: Node that we're testing. */ int klist_node_attached(struct klist_node *n) { return (n->n_klist != NULL); } EXPORT_SYMBOL_GPL(klist_node_attached); /** * klist_iter_init_node - Initialize a klist_iter structure. * @k: klist we're iterating. * @i: klist_iter we're filling. * @n: node to start with. * * Similar to klist_iter_init(), but starts the action off with @n, * instead of with the list head. */ void klist_iter_init_node(struct klist *k, struct klist_iter *i, struct klist_node *n) { i->i_klist = k; i->i_cur = n; if (n) kref_get(&n->n_ref); } EXPORT_SYMBOL_GPL(klist_iter_init_node); /** * klist_iter_init - Iniitalize a klist_iter structure. * @k: klist we're iterating. * @i: klist_iter structure we're filling. * * Similar to klist_iter_init_node(), but start with the list head. */ void klist_iter_init(struct klist *k, struct klist_iter *i) { klist_iter_init_node(k, i, NULL); } EXPORT_SYMBOL_GPL(klist_iter_init); /** * klist_iter_exit - Finish a list iteration. * @i: Iterator structure. * * Must be called when done iterating over list, as it decrements the * refcount of the current node. Necessary in case iteration exited before * the end of the list was reached, and always good form. */ void klist_iter_exit(struct klist_iter *i) { if (i->i_cur) { klist_put(i->i_cur, false); i->i_cur = NULL; } } EXPORT_SYMBOL_GPL(klist_iter_exit); static struct klist_node *to_klist_node(struct list_head *n) { return container_of(n, struct klist_node, n_node); } /** * klist_next - Ante up next node in list. * @i: Iterator structure. * * First grab list lock. Decrement the reference count of the previous * node, if there was one. Grab the next node, increment its reference * count, drop the lock, and return that next node. */ struct klist_node *klist_next(struct klist_iter *i) { void (*put)(struct klist_node *) = i->i_klist->put; struct klist_node *last = i->i_cur; struct klist_node *next; spin_lock(&i->i_klist->k_lock); if (last) { next = to_klist_node(last->n_node.next); if (!klist_dec_and_del(last)) put = NULL; } else next = to_klist_node(i->i_klist->k_list.next); i->i_cur = NULL; while (next != to_klist_node(&i->i_klist->k_list)) { if (likely(!knode_dead(next))) { kref_get(&next->n_ref); i->i_cur = next; break; } next = to_klist_node(next->n_node.next); } spin_unlock(&i->i_klist->k_lock); if (put && last) put(last); return i->i_cur; } EXPORT_SYMBOL_GPL(klist_next);