aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2016-05-27 14:34:46 -0400
committerJens Axboe <axboe@fb.com>2016-05-30 10:54:40 -0400
commit62a584fe05eef1f80ed49a286a29328f1a224fb9 (patch)
treecf34bae00e267a9e5fb94a70d7f24f893d325e6b /mm
parentb02b1fbdd338f70e49efa9ca9899214134526701 (diff)
writeback: use higher precision calculation in domain_dirty_limits()
As vm.dirty_[background_]bytes can't be applied verbatim to multiple cgroup writeback domains, they get converted to percentages in domain_dirty_limits() and applied the same way as vm.dirty_[background]ratio. However, if the specified bytes is lower than 1% of available memory, the calculated ratios become zero and the writeback domain gets throttled constantly. Fix it by using per-PAGE_SIZE instead of percentage for ratio calculations. Also, the updated DIV_ROUND_UP() usages now should yield 1/4096 (0.0244%) as the minimum ratio as long as the specified bytes are above zero. Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: Miao Xie <miaoxie@huawei.com> Link: http://lkml.kernel.org/g/57333E75.3080309@huawei.com Cc: stable@vger.kernel.org # v4.2+ Fixes: 9fc3a43e1757 ("writeback: separate out domain_dirty_limits()") Reviewed-by: Jan Kara <jack@suse.cz> Adjusted comment based on Jan's suggestion. Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/page-writeback.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index b9956fdee8f5..e2481949494c 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -373,8 +373,9 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
373 struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc); 373 struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
374 unsigned long bytes = vm_dirty_bytes; 374 unsigned long bytes = vm_dirty_bytes;
375 unsigned long bg_bytes = dirty_background_bytes; 375 unsigned long bg_bytes = dirty_background_bytes;
376 unsigned long ratio = vm_dirty_ratio; 376 /* convert ratios to per-PAGE_SIZE for higher precision */
377 unsigned long bg_ratio = dirty_background_ratio; 377 unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
378 unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
378 unsigned long thresh; 379 unsigned long thresh;
379 unsigned long bg_thresh; 380 unsigned long bg_thresh;
380 struct task_struct *tsk; 381 struct task_struct *tsk;
@@ -386,26 +387,28 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
386 /* 387 /*
387 * The byte settings can't be applied directly to memcg 388 * The byte settings can't be applied directly to memcg
388 * domains. Convert them to ratios by scaling against 389 * domains. Convert them to ratios by scaling against
389 * globally available memory. 390 * globally available memory. As the ratios are in
391 * per-PAGE_SIZE, they can be obtained by dividing bytes by
392 * number of pages.
390 */ 393 */
391 if (bytes) 394 if (bytes)
392 ratio = min(DIV_ROUND_UP(bytes, PAGE_SIZE) * 100 / 395 ratio = min(DIV_ROUND_UP(bytes, global_avail),
393 global_avail, 100UL); 396 PAGE_SIZE);
394 if (bg_bytes) 397 if (bg_bytes)
395 bg_ratio = min(DIV_ROUND_UP(bg_bytes, PAGE_SIZE) * 100 / 398 bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
396 global_avail, 100UL); 399 PAGE_SIZE);
397 bytes = bg_bytes = 0; 400 bytes = bg_bytes = 0;
398 } 401 }
399 402
400 if (bytes) 403 if (bytes)
401 thresh = DIV_ROUND_UP(bytes, PAGE_SIZE); 404 thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
402 else 405 else
403 thresh = (ratio * available_memory) / 100; 406 thresh = (ratio * available_memory) / PAGE_SIZE;
404 407
405 if (bg_bytes) 408 if (bg_bytes)
406 bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE); 409 bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
407 else 410 else
408 bg_thresh = (bg_ratio * available_memory) / 100; 411 bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
409 412
410 if (bg_thresh >= thresh) 413 if (bg_thresh >= thresh)
411 bg_thresh = thresh / 2; 414 bg_thresh = thresh / 2;