aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c50
1 files changed, 40 insertions, 10 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index a794945fd194..029dfad5a235 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -119,6 +119,44 @@ static void background_writeout(unsigned long _min_pages);
119 * We make sure that the background writeout level is below the adjusted 119 * We make sure that the background writeout level is below the adjusted
120 * clamping level. 120 * clamping level.
121 */ 121 */
122
123static unsigned long highmem_dirtyable_memory(unsigned long total)
124{
125#ifdef CONFIG_HIGHMEM
126 int node;
127 unsigned long x = 0;
128
129 for_each_online_node(node) {
130 struct zone *z =
131 &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
132
133 x += zone_page_state(z, NR_FREE_PAGES)
134 + zone_page_state(z, NR_INACTIVE)
135 + zone_page_state(z, NR_ACTIVE);
136 }
137 /*
138 * Make sure that the number of highmem pages is never larger
139 * than the number of the total dirtyable memory. This can only
140 * occur in very strange VM situations but we want to make sure
141 * that this does not occur.
142 */
143 return min(x, total);
144#else
145 return 0;
146#endif
147}
148
149static unsigned long determine_dirtyable_memory(void)
150{
151 unsigned long x;
152
153 x = global_page_state(NR_FREE_PAGES)
154 + global_page_state(NR_INACTIVE)
155 + global_page_state(NR_ACTIVE);
156 x -= highmem_dirtyable_memory(x);
157 return x + 1; /* Ensure that we never return 0 */
158}
159
122static void 160static void
123get_dirty_limits(long *pbackground, long *pdirty, 161get_dirty_limits(long *pbackground, long *pdirty,
124 struct address_space *mapping) 162 struct address_space *mapping)
@@ -128,20 +166,12 @@ get_dirty_limits(long *pbackground, long *pdirty,
128 int unmapped_ratio; 166 int unmapped_ratio;
129 long background; 167 long background;
130 long dirty; 168 long dirty;
131 unsigned long available_memory = vm_total_pages; 169 unsigned long available_memory = determine_dirtyable_memory();
132 struct task_struct *tsk; 170 struct task_struct *tsk;
133 171
134#ifdef CONFIG_HIGHMEM
135 /*
136 * We always exclude high memory from our count.
137 */
138 available_memory -= totalhigh_pages;
139#endif
140
141
142 unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) + 172 unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) +
143 global_page_state(NR_ANON_PAGES)) * 100) / 173 global_page_state(NR_ANON_PAGES)) * 100) /
144 vm_total_pages; 174 available_memory;
145 175
146 dirty_ratio = vm_dirty_ratio; 176 dirty_ratio = vm_dirty_ratio;
147 if (dirty_ratio > unmapped_ratio / 2) 177 if (dirty_ratio > unmapped_ratio / 2)