aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c26
1 files changed, 20 insertions, 6 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index f9d9f5476d58..1d781803e629 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -281,12 +281,13 @@ static inline void task_dirties_fraction(struct task_struct *tsk,
281 * effectively curb the growth of dirty pages. Light dirtiers with high enough 281 * effectively curb the growth of dirty pages. Light dirtiers with high enough
282 * dirty threshold may never get throttled. 282 * dirty threshold may never get throttled.
283 */ 283 */
284#define TASK_LIMIT_FRACTION 8
284static unsigned long task_dirty_limit(struct task_struct *tsk, 285static unsigned long task_dirty_limit(struct task_struct *tsk,
285 unsigned long bdi_dirty) 286 unsigned long bdi_dirty)
286{ 287{
287 long numerator, denominator; 288 long numerator, denominator;
288 unsigned long dirty = bdi_dirty; 289 unsigned long dirty = bdi_dirty;
289 u64 inv = dirty >> 3; 290 u64 inv = dirty / TASK_LIMIT_FRACTION;
290 291
291 task_dirties_fraction(tsk, &numerator, &denominator); 292 task_dirties_fraction(tsk, &numerator, &denominator);
292 inv *= numerator; 293 inv *= numerator;
@@ -297,6 +298,12 @@ static unsigned long task_dirty_limit(struct task_struct *tsk,
297 return max(dirty, bdi_dirty/2); 298 return max(dirty, bdi_dirty/2);
298} 299}
299 300
301/* Minimum limit for any task */
302static unsigned long task_min_dirty_limit(unsigned long bdi_dirty)
303{
304 return bdi_dirty - bdi_dirty / TASK_LIMIT_FRACTION;
305}
306
300/* 307/*
301 * 308 *
302 */ 309 */
@@ -651,9 +658,12 @@ static void balance_dirty_pages(struct address_space *mapping,
651 unsigned long background_thresh; 658 unsigned long background_thresh;
652 unsigned long dirty_thresh; 659 unsigned long dirty_thresh;
653 unsigned long bdi_thresh; 660 unsigned long bdi_thresh;
661 unsigned long task_bdi_thresh;
662 unsigned long min_task_bdi_thresh;
654 unsigned long pages_written = 0; 663 unsigned long pages_written = 0;
655 unsigned long pause = 1; 664 unsigned long pause = 1;
656 bool dirty_exceeded = false; 665 bool dirty_exceeded = false;
666 bool clear_dirty_exceeded = true;
657 struct backing_dev_info *bdi = mapping->backing_dev_info; 667 struct backing_dev_info *bdi = mapping->backing_dev_info;
658 unsigned long start_time = jiffies; 668 unsigned long start_time = jiffies;
659 669
@@ -673,7 +683,8 @@ static void balance_dirty_pages(struct address_space *mapping,
673 break; 683 break;
674 684
675 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); 685 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
676 bdi_thresh = task_dirty_limit(current, bdi_thresh); 686 min_task_bdi_thresh = task_min_dirty_limit(bdi_thresh);
687 task_bdi_thresh = task_dirty_limit(current, bdi_thresh);
677 688
678 /* 689 /*
679 * In order to avoid the stacked BDI deadlock we need 690 * In order to avoid the stacked BDI deadlock we need
@@ -685,7 +696,7 @@ static void balance_dirty_pages(struct address_space *mapping,
685 * actually dirty; with m+n sitting in the percpu 696 * actually dirty; with m+n sitting in the percpu
686 * deltas. 697 * deltas.
687 */ 698 */
688 if (bdi_thresh < 2*bdi_stat_error(bdi)) { 699 if (task_bdi_thresh < 2 * bdi_stat_error(bdi)) {
689 bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE); 700 bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
690 bdi_dirty = bdi_nr_reclaimable + 701 bdi_dirty = bdi_nr_reclaimable +
691 bdi_stat_sum(bdi, BDI_WRITEBACK); 702 bdi_stat_sum(bdi, BDI_WRITEBACK);
@@ -701,8 +712,10 @@ static void balance_dirty_pages(struct address_space *mapping,
701 * bdi or process from holding back light ones; The latter is 712 * bdi or process from holding back light ones; The latter is
702 * the last resort safeguard. 713 * the last resort safeguard.
703 */ 714 */
704 dirty_exceeded = (bdi_dirty > bdi_thresh) || 715 dirty_exceeded = (bdi_dirty > task_bdi_thresh) ||
705 (nr_dirty > dirty_thresh); 716 (nr_dirty > dirty_thresh);
717 clear_dirty_exceeded = (bdi_dirty <= min_task_bdi_thresh) &&
718 (nr_dirty <= dirty_thresh);
706 719
707 if (!dirty_exceeded) 720 if (!dirty_exceeded)
708 break; 721 break;
@@ -723,7 +736,7 @@ static void balance_dirty_pages(struct address_space *mapping,
723 * up. 736 * up.
724 */ 737 */
725 trace_balance_dirty_start(bdi); 738 trace_balance_dirty_start(bdi);
726 if (bdi_nr_reclaimable > bdi_thresh) { 739 if (bdi_nr_reclaimable > task_bdi_thresh) {
727 pages_written += writeback_inodes_wb(&bdi->wb, 740 pages_written += writeback_inodes_wb(&bdi->wb,
728 write_chunk); 741 write_chunk);
729 trace_balance_dirty_written(bdi, pages_written); 742 trace_balance_dirty_written(bdi, pages_written);
@@ -766,7 +779,8 @@ static void balance_dirty_pages(struct address_space *mapping,
766 pause = HZ / 10; 779 pause = HZ / 10;
767 } 780 }
768 781
769 if (!dirty_exceeded && bdi->dirty_exceeded) 782 /* Clear dirty_exceeded flag only when no task can exceed the limit */
783 if (clear_dirty_exceeded && bdi->dirty_exceeded)
770 bdi->dirty_exceeded = 0; 784 bdi->dirty_exceeded = 0;
771 785
772 if (writeback_in_progress(bdi)) 786 if (writeback_in_progress(bdi))