summaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-05-22 18:23:22 -0400
committerJens Axboe <axboe@fb.com>2015-06-02 10:38:12 -0400
commitdcc25ae76eb7b8ff883eaaab57e30e8f2f085be3 (patch)
tree7fb1d01278ad2b16a1c21ead3e567a3bb7d00c25 /mm/page-writeback.c
parent380c27ca33ebecc9da35aa90c8b3a9154f90aac2 (diff)
writeback: move global_dirty_limit into wb_domain
This patch is a part of the series to define wb_domain which represents a domain that wb's (bdi_writeback's) belong to and are measured against each other in. This will enable IO backpressure propagation for cgroup writeback. global_dirty_limit exists to regulate the global dirty threshold which is a property of the wb_domain. This patch moves hard_dirty_limit, dirty_lock, and update_time into wb_domain. This is pure reorganization and doesn't introduce any behavioral changes. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jan Kara <jack@suse.cz> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Greg Thelen <gthelen@google.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c46
1 files changed, 23 insertions, 23 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 08e1737edb39..27e60ba8e688 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -122,9 +122,7 @@ EXPORT_SYMBOL(laptop_mode);
122 122
123/* End of sysctl-exported parameters */ 123/* End of sysctl-exported parameters */
124 124
125unsigned long global_dirty_limit; 125struct wb_domain global_wb_domain;
126
127static struct wb_domain global_wb_domain;
128 126
129/* 127/*
130 * Length of period for aging writeout fractions of bdis. This is an 128 * Length of period for aging writeout fractions of bdis. This is an
@@ -470,9 +468,15 @@ static void writeout_period(unsigned long t)
470int wb_domain_init(struct wb_domain *dom, gfp_t gfp) 468int wb_domain_init(struct wb_domain *dom, gfp_t gfp)
471{ 469{
472 memset(dom, 0, sizeof(*dom)); 470 memset(dom, 0, sizeof(*dom));
471
472 spin_lock_init(&dom->lock);
473
473 init_timer_deferrable(&dom->period_timer); 474 init_timer_deferrable(&dom->period_timer);
474 dom->period_timer.function = writeout_period; 475 dom->period_timer.function = writeout_period;
475 dom->period_timer.data = (unsigned long)dom; 476 dom->period_timer.data = (unsigned long)dom;
477
478 dom->dirty_limit_tstamp = jiffies;
479
476 return fprop_global_init(&dom->completions, gfp); 480 return fprop_global_init(&dom->completions, gfp);
477} 481}
478 482
@@ -532,7 +536,9 @@ static unsigned long dirty_freerun_ceiling(unsigned long thresh,
532 536
533static unsigned long hard_dirty_limit(unsigned long thresh) 537static unsigned long hard_dirty_limit(unsigned long thresh)
534{ 538{
535 return max(thresh, global_dirty_limit); 539 struct wb_domain *dom = &global_wb_domain;
540
541 return max(thresh, dom->dirty_limit);
536} 542}
537 543
538/** 544/**
@@ -916,17 +922,10 @@ out:
916 wb->avg_write_bandwidth = avg; 922 wb->avg_write_bandwidth = avg;
917} 923}
918 924
919/*
920 * The global dirtyable memory and dirty threshold could be suddenly knocked
921 * down by a large amount (eg. on the startup of KVM in a swapless system).
922 * This may throw the system into deep dirty exceeded state and throttle
923 * heavy/light dirtiers alike. To retain good responsiveness, maintain
924 * global_dirty_limit for tracking slowly down to the knocked down dirty
925 * threshold.
926 */
927static void update_dirty_limit(unsigned long thresh, unsigned long dirty) 925static void update_dirty_limit(unsigned long thresh, unsigned long dirty)
928{ 926{
929 unsigned long limit = global_dirty_limit; 927 struct wb_domain *dom = &global_wb_domain;
928 unsigned long limit = dom->dirty_limit;
930 929
931 /* 930 /*
932 * Follow up in one step. 931 * Follow up in one step.
@@ -939,7 +938,7 @@ static void update_dirty_limit(unsigned long thresh, unsigned long dirty)
939 /* 938 /*
940 * Follow down slowly. Use the higher one as the target, because thresh 939 * Follow down slowly. Use the higher one as the target, because thresh
941 * may drop below dirty. This is exactly the reason to introduce 940 * may drop below dirty. This is exactly the reason to introduce
942 * global_dirty_limit which is guaranteed to lie above the dirty pages. 941 * dom->dirty_limit which is guaranteed to lie above the dirty pages.
943 */ 942 */
944 thresh = max(thresh, dirty); 943 thresh = max(thresh, dirty);
945 if (limit > thresh) { 944 if (limit > thresh) {
@@ -948,28 +947,27 @@ static void update_dirty_limit(unsigned long thresh, unsigned long dirty)
948 } 947 }
949 return; 948 return;
950update: 949update:
951 global_dirty_limit = limit; 950 dom->dirty_limit = limit;
952} 951}
953 952
954static void global_update_bandwidth(unsigned long thresh, 953static void global_update_bandwidth(unsigned long thresh,
955 unsigned long dirty, 954 unsigned long dirty,
956 unsigned long now) 955 unsigned long now)
957{ 956{
958 static DEFINE_SPINLOCK(dirty_lock); 957 struct wb_domain *dom = &global_wb_domain;
959 static unsigned long update_time = INITIAL_JIFFIES;
960 958
961 /* 959 /*
962 * check locklessly first to optimize away locking for the most time 960 * check locklessly first to optimize away locking for the most time
963 */ 961 */
964 if (time_before(now, update_time + BANDWIDTH_INTERVAL)) 962 if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
965 return; 963 return;
966 964
967 spin_lock(&dirty_lock); 965 spin_lock(&dom->lock);
968 if (time_after_eq(now, update_time + BANDWIDTH_INTERVAL)) { 966 if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
969 update_dirty_limit(thresh, dirty); 967 update_dirty_limit(thresh, dirty);
970 update_time = now; 968 dom->dirty_limit_tstamp = now;
971 } 969 }
972 spin_unlock(&dirty_lock); 970 spin_unlock(&dom->lock);
973} 971}
974 972
975/* 973/*
@@ -1761,10 +1759,12 @@ void laptop_sync_completion(void)
1761 1759
1762void writeback_set_ratelimit(void) 1760void writeback_set_ratelimit(void)
1763{ 1761{
1762 struct wb_domain *dom = &global_wb_domain;
1764 unsigned long background_thresh; 1763 unsigned long background_thresh;
1765 unsigned long dirty_thresh; 1764 unsigned long dirty_thresh;
1765
1766 global_dirty_limits(&background_thresh, &dirty_thresh); 1766 global_dirty_limits(&background_thresh, &dirty_thresh);
1767 global_dirty_limit = dirty_thresh; 1767 dom->dirty_limit = dirty_thresh;
1768 ratelimit_pages = dirty_thresh / (num_online_cpus() * 32); 1768 ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
1769 if (ratelimit_pages < 16) 1769 if (ratelimit_pages < 16)
1770 ratelimit_pages = 16; 1770 ratelimit_pages = 16;