diff options
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r-- | mm/page-writeback.c | 24 |
1 files changed, 18 insertions, 6 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 3d3848fa6324..5e00f1772c20 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -69,6 +69,12 @@ static inline long sync_writeback_pages(void) | |||
69 | int dirty_background_ratio = 5; | 69 | int dirty_background_ratio = 5; |
70 | 70 | ||
71 | /* | 71 | /* |
72 | * free highmem will not be subtracted from the total free memory | ||
73 | * for calculating free ratios if vm_highmem_is_dirtyable is true | ||
74 | */ | ||
75 | int vm_highmem_is_dirtyable; | ||
76 | |||
77 | /* | ||
72 | * The generator of dirty data starts writeback at this percentage | 78 | * The generator of dirty data starts writeback at this percentage |
73 | */ | 79 | */ |
74 | int vm_dirty_ratio = 10; | 80 | int vm_dirty_ratio = 10; |
@@ -219,7 +225,7 @@ static inline void task_dirties_fraction(struct task_struct *tsk, | |||
219 | * | 225 | * |
220 | * dirty -= (dirty/8) * p_{t} | 226 | * dirty -= (dirty/8) * p_{t} |
221 | */ | 227 | */ |
222 | void task_dirty_limit(struct task_struct *tsk, long *pdirty) | 228 | static void task_dirty_limit(struct task_struct *tsk, long *pdirty) |
223 | { | 229 | { |
224 | long numerator, denominator; | 230 | long numerator, denominator; |
225 | long dirty = *pdirty; | 231 | long dirty = *pdirty; |
@@ -287,7 +293,10 @@ static unsigned long determine_dirtyable_memory(void) | |||
287 | x = global_page_state(NR_FREE_PAGES) | 293 | x = global_page_state(NR_FREE_PAGES) |
288 | + global_page_state(NR_INACTIVE) | 294 | + global_page_state(NR_INACTIVE) |
289 | + global_page_state(NR_ACTIVE); | 295 | + global_page_state(NR_ACTIVE); |
290 | x -= highmem_dirtyable_memory(x); | 296 | |
297 | if (!vm_highmem_is_dirtyable) | ||
298 | x -= highmem_dirtyable_memory(x); | ||
299 | |||
291 | return x + 1; /* Ensure that we never return 0 */ | 300 | return x + 1; /* Ensure that we never return 0 */ |
292 | } | 301 | } |
293 | 302 | ||
@@ -558,6 +567,7 @@ static void background_writeout(unsigned long _min_pages) | |||
558 | global_page_state(NR_UNSTABLE_NFS) < background_thresh | 567 | global_page_state(NR_UNSTABLE_NFS) < background_thresh |
559 | && min_pages <= 0) | 568 | && min_pages <= 0) |
560 | break; | 569 | break; |
570 | wbc.more_io = 0; | ||
561 | wbc.encountered_congestion = 0; | 571 | wbc.encountered_congestion = 0; |
562 | wbc.nr_to_write = MAX_WRITEBACK_PAGES; | 572 | wbc.nr_to_write = MAX_WRITEBACK_PAGES; |
563 | wbc.pages_skipped = 0; | 573 | wbc.pages_skipped = 0; |
@@ -565,8 +575,9 @@ static void background_writeout(unsigned long _min_pages) | |||
565 | min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; | 575 | min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; |
566 | if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) { | 576 | if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) { |
567 | /* Wrote less than expected */ | 577 | /* Wrote less than expected */ |
568 | congestion_wait(WRITE, HZ/10); | 578 | if (wbc.encountered_congestion || wbc.more_io) |
569 | if (!wbc.encountered_congestion) | 579 | congestion_wait(WRITE, HZ/10); |
580 | else | ||
570 | break; | 581 | break; |
571 | } | 582 | } |
572 | } | 583 | } |
@@ -631,11 +642,12 @@ static void wb_kupdate(unsigned long arg) | |||
631 | global_page_state(NR_UNSTABLE_NFS) + | 642 | global_page_state(NR_UNSTABLE_NFS) + |
632 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); | 643 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); |
633 | while (nr_to_write > 0) { | 644 | while (nr_to_write > 0) { |
645 | wbc.more_io = 0; | ||
634 | wbc.encountered_congestion = 0; | 646 | wbc.encountered_congestion = 0; |
635 | wbc.nr_to_write = MAX_WRITEBACK_PAGES; | 647 | wbc.nr_to_write = MAX_WRITEBACK_PAGES; |
636 | writeback_inodes(&wbc); | 648 | writeback_inodes(&wbc); |
637 | if (wbc.nr_to_write > 0) { | 649 | if (wbc.nr_to_write > 0) { |
638 | if (wbc.encountered_congestion) | 650 | if (wbc.encountered_congestion || wbc.more_io) |
639 | congestion_wait(WRITE, HZ/10); | 651 | congestion_wait(WRITE, HZ/10); |
640 | else | 652 | else |
641 | break; /* All the old data is written */ | 653 | break; /* All the old data is written */ |
@@ -1064,7 +1076,7 @@ static int __set_page_dirty(struct page *page) | |||
1064 | return 0; | 1076 | return 0; |
1065 | } | 1077 | } |
1066 | 1078 | ||
1067 | int fastcall set_page_dirty(struct page *page) | 1079 | int set_page_dirty(struct page *page) |
1068 | { | 1080 | { |
1069 | int ret = __set_page_dirty(page); | 1081 | int ret = __set_page_dirty(page); |
1070 | if (ret) | 1082 | if (ret) |