aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2015-08-09 07:18:50 -0400
committerBjoern Brandenburg <bbb@mpi-sws.org>2017-05-26 17:12:30 -0400
commit9cd62c8dc5bad42f2de65e9a50fed0fe2608a162 (patch)
treefafa75f7f34d0f1533b64795377e6a82b37d5244 /mm
parenta286e5f00ef48ab8d9b189370441ce90d855b306 (diff)
Augment rt_task() with is_realtime()
Whenever the kernel checks for rt_task() to avoid delaying real-time tasks, we want it to also not delay LITMUS^RT tasks. Hence, most calls to rt_task() should be matched by an equivalent call to is_realtime(). Notably, this affects the implementations of select() and nanosleep(), which use timer_slack_ns when setting up timers for non-real-time tasks.
Diffstat (limited to 'mm')
-rw-r--r--mm/page-writeback.c6
-rw-r--r--mm/page_alloc.c6
2 files changed, 9 insertions, 3 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 439cc63ad903..9073c0889192 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -41,6 +41,8 @@
41 41
42#include "internal.h" 42#include "internal.h"
43 43
44#include <litmus/litmus.h> /* for is_realtime() */
45
44/* 46/*
45 * Sleep at most 200ms at a time in balance_dirty_pages(). 47 * Sleep at most 200ms at a time in balance_dirty_pages().
46 */ 48 */
@@ -435,7 +437,7 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
435 if (bg_thresh >= thresh) 437 if (bg_thresh >= thresh)
436 bg_thresh = thresh / 2; 438 bg_thresh = thresh / 2;
437 tsk = current; 439 tsk = current;
438 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) { 440 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk) || is_realtime(tsk)) {
439 bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32; 441 bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
440 thresh += thresh / 4 + global_wb_domain.dirty_limit / 32; 442 thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
441 } 443 }
@@ -485,7 +487,7 @@ static unsigned long node_dirty_limit(struct pglist_data *pgdat)
485 else 487 else
486 dirty = vm_dirty_ratio * node_memory / 100; 488 dirty = vm_dirty_ratio * node_memory / 100;
487 489
488 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) 490 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk) || is_realtime(tsk))
489 dirty += dirty / 4; 491 dirty += dirty / 4;
490 492
491 return dirty; 493 return dirty;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5b06fb385dd7..7c51a38d5d53 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -66,6 +66,9 @@
66#include <linux/memcontrol.h> 66#include <linux/memcontrol.h>
67 67
68#include <asm/sections.h> 68#include <asm/sections.h>
69
70#include <litmus/litmus.h> /* for is_realtime() */
71
69#include <asm/tlbflush.h> 72#include <asm/tlbflush.h>
70#include <asm/div64.h> 73#include <asm/div64.h>
71#include "internal.h" 74#include "internal.h"
@@ -3365,7 +3368,8 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
3365 * comment for __cpuset_node_allowed(). 3368 * comment for __cpuset_node_allowed().
3366 */ 3369 */
3367 alloc_flags &= ~ALLOC_CPUSET; 3370 alloc_flags &= ~ALLOC_CPUSET;
3368 } else if (unlikely(rt_task(current)) && !in_interrupt()) 3371 } else if (unlikely(rt_task(current) || is_realtime(current))
3372 && !in_interrupt())
3369 alloc_flags |= ALLOC_HARDER; 3373 alloc_flags |= ALLOC_HARDER;
3370 3374
3371#ifdef CONFIG_CMA 3375#ifdef CONFIG_CMA