aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2015-04-07 05:03:02 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2015-04-07 05:03:02 -0400
commitc610f7f772aa06ae2bd8e5ace87cde4d90f70198 (patch)
tree6991a32acd3d63685c184734869877707fa73d1c /mm/page-writeback.c
parentf9541f8239a5765595543b021b2dce6a8d6653ab (diff)
parentf22e6e847115abc3a0e2ad7bb18d243d42275af1 (diff)
Merge 4.0-rc7 into staging-next
We want those fixes (iio primarily) into the -next branch to help with merge and testing issues. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 45e187b2d971..644bcb665773 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -857,8 +857,11 @@ static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
857 * bw * elapsed + write_bandwidth * (period - elapsed) 857 * bw * elapsed + write_bandwidth * (period - elapsed)
858 * write_bandwidth = --------------------------------------------------- 858 * write_bandwidth = ---------------------------------------------------
859 * period 859 * period
860 *
861 * @written may have decreased due to account_page_redirty().
862 * Avoid underflowing @bw calculation.
860 */ 863 */
861 bw = written - bdi->written_stamp; 864 bw = written - min(written, bdi->written_stamp);
862 bw *= HZ; 865 bw *= HZ;
863 if (unlikely(elapsed > period)) { 866 if (unlikely(elapsed > period)) {
864 do_div(bw, elapsed); 867 do_div(bw, elapsed);
@@ -922,7 +925,7 @@ static void global_update_bandwidth(unsigned long thresh,
922 unsigned long now) 925 unsigned long now)
923{ 926{
924 static DEFINE_SPINLOCK(dirty_lock); 927 static DEFINE_SPINLOCK(dirty_lock);
925 static unsigned long update_time; 928 static unsigned long update_time = INITIAL_JIFFIES;
926 929
927 /* 930 /*
928 * check locklessly first to optimize away locking for the most time 931 * check locklessly first to optimize away locking for the most time