aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2009-01-06 17:39:29 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-06 18:59:02 -0500
commit364aeb2849789b51bf4b9af2ddd02fee7285c54e (patch)
tree4a24ea43a2a76ae744571f3c7b5022aa1107599e /mm/page-writeback.c
parent58a01a45721bf7bd3a41a86248c3cb02a6b0c501 (diff)
mm: change dirty limit type specifiers to unsigned long
The background dirty and dirty limits are better defined with type specifiers of unsigned long since negative writeback thresholds are not possible. These values, as returned by get_dirty_limits(), are normally compared with ZVC values to determine whether writeback shall commence or be throttled. Such page counts cannot be negative, so declaring the page limits as signed is unnecessary. Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Dave Chinner <david@fromorbit.com> Cc: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: David Rientjes <rientjes@google.com> Cc: Andrea Righi <righi.andrea@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 08d2b960b294..4d4074cff300 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -362,13 +362,13 @@ unsigned long determine_dirtyable_memory(void)
362} 362}
363 363
364void 364void
365get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty, 365get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
366 struct backing_dev_info *bdi) 366 unsigned long *pbdi_dirty, struct backing_dev_info *bdi)
367{ 367{
368 int background_ratio; /* Percentages */ 368 int background_ratio; /* Percentages */
369 int dirty_ratio; 369 int dirty_ratio;
370 long background; 370 unsigned long background;
371 long dirty; 371 unsigned long dirty;
372 unsigned long available_memory = determine_dirtyable_memory(); 372 unsigned long available_memory = determine_dirtyable_memory();
373 struct task_struct *tsk; 373 struct task_struct *tsk;
374 374
@@ -423,9 +423,9 @@ static void balance_dirty_pages(struct address_space *mapping)
423{ 423{
424 long nr_reclaimable, bdi_nr_reclaimable; 424 long nr_reclaimable, bdi_nr_reclaimable;
425 long nr_writeback, bdi_nr_writeback; 425 long nr_writeback, bdi_nr_writeback;
426 long background_thresh; 426 unsigned long background_thresh;
427 long dirty_thresh; 427 unsigned long dirty_thresh;
428 long bdi_thresh; 428 unsigned long bdi_thresh;
429 unsigned long pages_written = 0; 429 unsigned long pages_written = 0;
430 unsigned long write_chunk = sync_writeback_pages(); 430 unsigned long write_chunk = sync_writeback_pages();
431 431
@@ -580,8 +580,8 @@ EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
580 580
581void throttle_vm_writeout(gfp_t gfp_mask) 581void throttle_vm_writeout(gfp_t gfp_mask)
582{ 582{
583 long background_thresh; 583 unsigned long background_thresh;
584 long dirty_thresh; 584 unsigned long dirty_thresh;
585 585
586 for ( ; ; ) { 586 for ( ; ; ) {
587 get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL); 587 get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
@@ -624,8 +624,8 @@ static void background_writeout(unsigned long _min_pages)
624 }; 624 };
625 625
626 for ( ; ; ) { 626 for ( ; ; ) {
627 long background_thresh; 627 unsigned long background_thresh;
628 long dirty_thresh; 628 unsigned long dirty_thresh;
629 629
630 get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL); 630 get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
631 if (global_page_state(NR_FILE_DIRTY) + 631 if (global_page_state(NR_FILE_DIRTY) +