aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-06-30 04:55:42 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-30 14:25:36 -0400
commitc24f21bda88df4574de0a32a2a1558a23adae1b8 (patch)
tree1cf2f0b69cc691c2e1a9ed569a7a98179cc9683e /mm/page-writeback.c
parentd2c5e30c9a1420902262aa923794d2ae4e0bc391 (diff)
[PATCH] zoned vm counters: remove useless struct wbs
Remove writeback state We can remove some functions now that were needed to calculate the page state for writeback control since these statistics are now directly available. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c85
1 files changed, 34 insertions, 51 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index de9836f43db5..e630188ccc40 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -99,23 +99,6 @@ EXPORT_SYMBOL(laptop_mode);
99 99
100static void background_writeout(unsigned long _min_pages); 100static void background_writeout(unsigned long _min_pages);
101 101
102struct writeback_state
103{
104 unsigned long nr_dirty;
105 unsigned long nr_unstable;
106 unsigned long nr_mapped;
107 unsigned long nr_writeback;
108};
109
110static void get_writeback_state(struct writeback_state *wbs)
111{
112 wbs->nr_dirty = global_page_state(NR_FILE_DIRTY);
113 wbs->nr_unstable = global_page_state(NR_UNSTABLE_NFS);
114 wbs->nr_mapped = global_page_state(NR_FILE_MAPPED) +
115 global_page_state(NR_ANON_PAGES);
116 wbs->nr_writeback = global_page_state(NR_WRITEBACK);
117}
118
119/* 102/*
120 * Work out the current dirty-memory clamping and background writeout 103 * Work out the current dirty-memory clamping and background writeout
121 * thresholds. 104 * thresholds.
@@ -134,8 +117,8 @@ static void get_writeback_state(struct writeback_state *wbs)
134 * clamping level. 117 * clamping level.
135 */ 118 */
136static void 119static void
137get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty, 120get_dirty_limits(long *pbackground, long *pdirty,
138 struct address_space *mapping) 121 struct address_space *mapping)
139{ 122{
140 int background_ratio; /* Percentages */ 123 int background_ratio; /* Percentages */
141 int dirty_ratio; 124 int dirty_ratio;
@@ -145,8 +128,6 @@ get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty,
145 unsigned long available_memory = total_pages; 128 unsigned long available_memory = total_pages;
146 struct task_struct *tsk; 129 struct task_struct *tsk;
147 130
148 get_writeback_state(wbs);
149
150#ifdef CONFIG_HIGHMEM 131#ifdef CONFIG_HIGHMEM
151 /* 132 /*
152 * If this mapping can only allocate from low memory, 133 * If this mapping can only allocate from low memory,
@@ -157,7 +138,9 @@ get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty,
157#endif 138#endif
158 139
159 140
160 unmapped_ratio = 100 - (wbs->nr_mapped * 100) / total_pages; 141 unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) +
142 global_page_state(NR_ANON_PAGES)) * 100) /
143 total_pages;
161 144
162 dirty_ratio = vm_dirty_ratio; 145 dirty_ratio = vm_dirty_ratio;
163 if (dirty_ratio > unmapped_ratio / 2) 146 if (dirty_ratio > unmapped_ratio / 2)
@@ -190,7 +173,6 @@ get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty,
190 */ 173 */
191static void balance_dirty_pages(struct address_space *mapping) 174static void balance_dirty_pages(struct address_space *mapping)
192{ 175{
193 struct writeback_state wbs;
194 long nr_reclaimable; 176 long nr_reclaimable;
195 long background_thresh; 177 long background_thresh;
196 long dirty_thresh; 178 long dirty_thresh;
@@ -208,11 +190,12 @@ static void balance_dirty_pages(struct address_space *mapping)
208 .range_cyclic = 1, 190 .range_cyclic = 1,
209 }; 191 };
210 192
211 get_dirty_limits(&wbs, &background_thresh, 193 get_dirty_limits(&background_thresh, &dirty_thresh, mapping);
212 &dirty_thresh, mapping); 194 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
213 nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable; 195 global_page_state(NR_UNSTABLE_NFS);
214 if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh) 196 if (nr_reclaimable + global_page_state(NR_WRITEBACK) <=
215 break; 197 dirty_thresh)
198 break;
216 199
217 if (!dirty_exceeded) 200 if (!dirty_exceeded)
218 dirty_exceeded = 1; 201 dirty_exceeded = 1;
@@ -225,11 +208,14 @@ static void balance_dirty_pages(struct address_space *mapping)
225 */ 208 */
226 if (nr_reclaimable) { 209 if (nr_reclaimable) {
227 writeback_inodes(&wbc); 210 writeback_inodes(&wbc);
228 get_dirty_limits(&wbs, &background_thresh, 211 get_dirty_limits(&background_thresh,
229 &dirty_thresh, mapping); 212 &dirty_thresh, mapping);
230 nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable; 213 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
231 if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh) 214 global_page_state(NR_UNSTABLE_NFS);
232 break; 215 if (nr_reclaimable +
216 global_page_state(NR_WRITEBACK)
217 <= dirty_thresh)
218 break;
233 pages_written += write_chunk - wbc.nr_to_write; 219 pages_written += write_chunk - wbc.nr_to_write;
234 if (pages_written >= write_chunk) 220 if (pages_written >= write_chunk)
235 break; /* We've done our duty */ 221 break; /* We've done our duty */
@@ -237,8 +223,9 @@ static void balance_dirty_pages(struct address_space *mapping)
237 blk_congestion_wait(WRITE, HZ/10); 223 blk_congestion_wait(WRITE, HZ/10);
238 } 224 }
239 225
240 if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh && dirty_exceeded) 226 if (nr_reclaimable + global_page_state(NR_WRITEBACK)
241 dirty_exceeded = 0; 227 <= dirty_thresh && dirty_exceeded)
228 dirty_exceeded = 0;
242 229
243 if (writeback_in_progress(bdi)) 230 if (writeback_in_progress(bdi))
244 return; /* pdflush is already working this queue */ 231 return; /* pdflush is already working this queue */
@@ -300,12 +287,11 @@ EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
300 287
301void throttle_vm_writeout(void) 288void throttle_vm_writeout(void)
302{ 289{
303 struct writeback_state wbs;
304 long background_thresh; 290 long background_thresh;
305 long dirty_thresh; 291 long dirty_thresh;
306 292
307 for ( ; ; ) { 293 for ( ; ; ) {
308 get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL); 294 get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
309 295
310 /* 296 /*
311 * Boost the allowable dirty threshold a bit for page 297 * Boost the allowable dirty threshold a bit for page
@@ -313,8 +299,9 @@ void throttle_vm_writeout(void)
313 */ 299 */
314 dirty_thresh += dirty_thresh / 10; /* wheeee... */ 300 dirty_thresh += dirty_thresh / 10; /* wheeee... */
315 301
316 if (wbs.nr_unstable + wbs.nr_writeback <= dirty_thresh) 302 if (global_page_state(NR_UNSTABLE_NFS) +
317 break; 303 global_page_state(NR_WRITEBACK) <= dirty_thresh)
304 break;
318 blk_congestion_wait(WRITE, HZ/10); 305 blk_congestion_wait(WRITE, HZ/10);
319 } 306 }
320} 307}
@@ -337,12 +324,12 @@ static void background_writeout(unsigned long _min_pages)
337 }; 324 };
338 325
339 for ( ; ; ) { 326 for ( ; ; ) {
340 struct writeback_state wbs;
341 long background_thresh; 327 long background_thresh;
342 long dirty_thresh; 328 long dirty_thresh;
343 329
344 get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL); 330 get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
345 if (wbs.nr_dirty + wbs.nr_unstable < background_thresh 331 if (global_page_state(NR_FILE_DIRTY) +
332 global_page_state(NR_UNSTABLE_NFS) < background_thresh
346 && min_pages <= 0) 333 && min_pages <= 0)
347 break; 334 break;
348 wbc.encountered_congestion = 0; 335 wbc.encountered_congestion = 0;
@@ -366,12 +353,9 @@ static void background_writeout(unsigned long _min_pages)
366 */ 353 */
367int wakeup_pdflush(long nr_pages) 354int wakeup_pdflush(long nr_pages)
368{ 355{
369 if (nr_pages == 0) { 356 if (nr_pages == 0)
370 struct writeback_state wbs; 357 nr_pages = global_page_state(NR_FILE_DIRTY) +
371 358 global_page_state(NR_UNSTABLE_NFS);
372 get_writeback_state(&wbs);
373 nr_pages = wbs.nr_dirty + wbs.nr_unstable;
374 }
375 return pdflush_operation(background_writeout, nr_pages); 359 return pdflush_operation(background_writeout, nr_pages);
376} 360}
377 361
@@ -402,7 +386,6 @@ static void wb_kupdate(unsigned long arg)
402 unsigned long start_jif; 386 unsigned long start_jif;
403 unsigned long next_jif; 387 unsigned long next_jif;
404 long nr_to_write; 388 long nr_to_write;
405 struct writeback_state wbs;
406 struct writeback_control wbc = { 389 struct writeback_control wbc = {
407 .bdi = NULL, 390 .bdi = NULL,
408 .sync_mode = WB_SYNC_NONE, 391 .sync_mode = WB_SYNC_NONE,
@@ -415,11 +398,11 @@ static void wb_kupdate(unsigned long arg)
415 398
416 sync_supers(); 399 sync_supers();
417 400
418 get_writeback_state(&wbs);
419 oldest_jif = jiffies - dirty_expire_interval; 401 oldest_jif = jiffies - dirty_expire_interval;
420 start_jif = jiffies; 402 start_jif = jiffies;
421 next_jif = start_jif + dirty_writeback_interval; 403 next_jif = start_jif + dirty_writeback_interval;
422 nr_to_write = wbs.nr_dirty + wbs.nr_unstable + 404 nr_to_write = global_page_state(NR_FILE_DIRTY) +
405 global_page_state(NR_UNSTABLE_NFS) +
423 (inodes_stat.nr_inodes - inodes_stat.nr_unused); 406 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
424 while (nr_to_write > 0) { 407 while (nr_to_write > 0) {
425 wbc.encountered_congestion = 0; 408 wbc.encountered_congestion = 0;