aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2006-10-20 02:28:16 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-20 13:26:35 -0400
commit3fcfab16c5b86eaa3db3a9a31adba550c5b67141 (patch)
treebd348fa081b8fbec2c79fbf8f173a306d70b2b2c /mm/page-writeback.c
parent79e2de4bc53d7ca2a8eedee49e4a92479b4b530e (diff)
[PATCH] separate bdi congestion functions from queue congestion functions
Separate out the concept of "queue congestion" from "backing-dev congestion". Congestion is a backing-dev concept, not a queue concept. The blk_* congestion functions are retained, as wrappers around the core backing-dev congestion functions. This proper layering is needed so that NFS can cleanly use the congestion functions, and so that CONFIG_BLOCK=n actually links. Cc: "Thomas Maier" <balagi@justmail.de> Cc: "Jens Axboe" <jens.axboe@oracle.com> Cc: Trond Myklebust <trond.myklebust@fys.uio.no> Cc: David Howells <dhowells@redhat.com> Cc: Peter Osterlund <petero2@telia.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c17
1 files changed, 4 insertions, 13 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index a0f339057449..8d9b19f239c3 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -222,7 +222,7 @@ static void balance_dirty_pages(struct address_space *mapping)
222 if (pages_written >= write_chunk) 222 if (pages_written >= write_chunk)
223 break; /* We've done our duty */ 223 break; /* We've done our duty */
224 } 224 }
225 blk_congestion_wait(WRITE, HZ/10); 225 congestion_wait(WRITE, HZ/10);
226 } 226 }
227 227
228 if (nr_reclaimable + global_page_state(NR_WRITEBACK) 228 if (nr_reclaimable + global_page_state(NR_WRITEBACK)
@@ -314,7 +314,7 @@ void throttle_vm_writeout(void)
314 if (global_page_state(NR_UNSTABLE_NFS) + 314 if (global_page_state(NR_UNSTABLE_NFS) +
315 global_page_state(NR_WRITEBACK) <= dirty_thresh) 315 global_page_state(NR_WRITEBACK) <= dirty_thresh)
316 break; 316 break;
317 blk_congestion_wait(WRITE, HZ/10); 317 congestion_wait(WRITE, HZ/10);
318 } 318 }
319} 319}
320 320
@@ -351,7 +351,7 @@ static void background_writeout(unsigned long _min_pages)
351 min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; 351 min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
352 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) { 352 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
353 /* Wrote less than expected */ 353 /* Wrote less than expected */
354 blk_congestion_wait(WRITE, HZ/10); 354 congestion_wait(WRITE, HZ/10);
355 if (!wbc.encountered_congestion) 355 if (!wbc.encountered_congestion)
356 break; 356 break;
357 } 357 }
@@ -422,7 +422,7 @@ static void wb_kupdate(unsigned long arg)
422 writeback_inodes(&wbc); 422 writeback_inodes(&wbc);
423 if (wbc.nr_to_write > 0) { 423 if (wbc.nr_to_write > 0) {
424 if (wbc.encountered_congestion) 424 if (wbc.encountered_congestion)
425 blk_congestion_wait(WRITE, HZ/10); 425 congestion_wait(WRITE, HZ/10);
426 else 426 else
427 break; /* All the old data is written */ 427 break; /* All the old data is written */
428 } 428 }
@@ -956,15 +956,6 @@ int test_set_page_writeback(struct page *page)
956EXPORT_SYMBOL(test_set_page_writeback); 956EXPORT_SYMBOL(test_set_page_writeback);
957 957
958/* 958/*
959 * Wakes up tasks that are being throttled due to writeback congestion
960 */
961void writeback_congestion_end(void)
962{
963 blk_congestion_end(WRITE);
964}
965EXPORT_SYMBOL(writeback_congestion_end);
966
967/*
968 * Return true if any of the pages in the mapping are marged with the 959 * Return true if any of the pages in the mapping are marged with the
969 * passed tag. 960 * passed tag.
970 */ 961 */