diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-05-06 17:48:59 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-07 15:12:51 -0400 |
commit | 1b4244647ceaad42ea6eb12899d58753d82b7727 (patch) | |
tree | fabdd30f1484d7ccda1374fffb9231c39450efb0 /mm/page-writeback.c | |
parent | 476f35348eb8d2a827765992899fea78b7dcc46f (diff) |
Use ZVC counters to establish exact size of dirtyable pages
We can use the global ZVC counters to establish the exact size of the LRU
and the free pages. This allows a more accurate determination of the dirty
ratio.
This patch will fix the broken ratio calculations if large amounts of
memory are allocated to huge pags or other consumers that do not put the
pages on to the LRU.
Notes:
- I did not add NR_SLAB_RECLAIMABLE to the calculation of the
dirtyable pages. Those may be reclaimable but they are at this
point not dirtyable. If NR_SLAB_RECLAIMABLE would be considered
then a huge number of reclaimable pages would stop writeback
from occurring.
- This patch used to be in mm as the last one in a series of patches.
It was removed when Linus updated the treatment of highmem because
there was a conflict. I updated the patch to follow Linus' approach.
This patch is neede to fulfill the claims made in the beginning of the
patchset that is now in Linus' tree.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r-- | mm/page-writeback.c | 50 |
1 files changed, 40 insertions, 10 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index a794945fd194..029dfad5a235 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -119,6 +119,44 @@ static void background_writeout(unsigned long _min_pages); | |||
119 | * We make sure that the background writeout level is below the adjusted | 119 | * We make sure that the background writeout level is below the adjusted |
120 | * clamping level. | 120 | * clamping level. |
121 | */ | 121 | */ |
122 | |||
123 | static unsigned long highmem_dirtyable_memory(unsigned long total) | ||
124 | { | ||
125 | #ifdef CONFIG_HIGHMEM | ||
126 | int node; | ||
127 | unsigned long x = 0; | ||
128 | |||
129 | for_each_online_node(node) { | ||
130 | struct zone *z = | ||
131 | &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; | ||
132 | |||
133 | x += zone_page_state(z, NR_FREE_PAGES) | ||
134 | + zone_page_state(z, NR_INACTIVE) | ||
135 | + zone_page_state(z, NR_ACTIVE); | ||
136 | } | ||
137 | /* | ||
138 | * Make sure that the number of highmem pages is never larger | ||
139 | * than the number of the total dirtyable memory. This can only | ||
140 | * occur in very strange VM situations but we want to make sure | ||
141 | * that this does not occur. | ||
142 | */ | ||
143 | return min(x, total); | ||
144 | #else | ||
145 | return 0; | ||
146 | #endif | ||
147 | } | ||
148 | |||
149 | static unsigned long determine_dirtyable_memory(void) | ||
150 | { | ||
151 | unsigned long x; | ||
152 | |||
153 | x = global_page_state(NR_FREE_PAGES) | ||
154 | + global_page_state(NR_INACTIVE) | ||
155 | + global_page_state(NR_ACTIVE); | ||
156 | x -= highmem_dirtyable_memory(x); | ||
157 | return x + 1; /* Ensure that we never return 0 */ | ||
158 | } | ||
159 | |||
122 | static void | 160 | static void |
123 | get_dirty_limits(long *pbackground, long *pdirty, | 161 | get_dirty_limits(long *pbackground, long *pdirty, |
124 | struct address_space *mapping) | 162 | struct address_space *mapping) |
@@ -128,20 +166,12 @@ get_dirty_limits(long *pbackground, long *pdirty, | |||
128 | int unmapped_ratio; | 166 | int unmapped_ratio; |
129 | long background; | 167 | long background; |
130 | long dirty; | 168 | long dirty; |
131 | unsigned long available_memory = vm_total_pages; | 169 | unsigned long available_memory = determine_dirtyable_memory(); |
132 | struct task_struct *tsk; | 170 | struct task_struct *tsk; |
133 | 171 | ||
134 | #ifdef CONFIG_HIGHMEM | ||
135 | /* | ||
136 | * We always exclude high memory from our count. | ||
137 | */ | ||
138 | available_memory -= totalhigh_pages; | ||
139 | #endif | ||
140 | |||
141 | |||
142 | unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) + | 172 | unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) + |
143 | global_page_state(NR_ANON_PAGES)) * 100) / | 173 | global_page_state(NR_ANON_PAGES)) * 100) / |
144 | vm_total_pages; | 174 | available_memory; |
145 | 175 | ||
146 | dirty_ratio = vm_dirty_ratio; | 176 | dirty_ratio = vm_dirty_ratio; |
147 | if (dirty_ratio > unmapped_ratio / 2) | 177 | if (dirty_ratio > unmapped_ratio / 2) |