aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2013-06-08 12:22:35 -0400
committerNamhoon Kim <namhoonk@cs.unc.edu>2014-10-21 10:03:28 -0400
commite44cd07ea0123cac05852b00f3c9d514a8999933 (patch)
treee129fcc815cb446e23b4e5ba67629592b4f0fa22 /mm
parent87c71e1c704021c7381821a6c654096db4f07b20 (diff)
Augment rt_task() with is_realtime()
Whenever the kernel checks for rt_task() to avoid delaying real-time tasks, we want it to also not delay LITMUS^RT tasks. Hence, most calls to rt_task() should be matched by an equivalent call to is_realtime(). Notably, this affects the implementations of select() and nanosleep(), which use timer_slack_ns when setting up timers for non-real-time tasks.
Diffstat (limited to 'mm')
-rw-r--r--mm/page-writeback.c6
-rw-r--r--mm/page_alloc.c5
2 files changed, 8 insertions, 3 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 73cbc5dc150b..1f0073b95d17 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -38,6 +38,8 @@
38#include <linux/sched/rt.h> 38#include <linux/sched/rt.h>
39#include <trace/events/writeback.h> 39#include <trace/events/writeback.h>
40 40
41#include <litmus/litmus.h> /* for is_realtime() */
42
41/* 43/*
42 * Sleep at most 200ms at a time in balance_dirty_pages(). 44 * Sleep at most 200ms at a time in balance_dirty_pages().
43 */ 45 */
@@ -300,7 +302,7 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
300 if (background >= dirty) 302 if (background >= dirty)
301 background = dirty / 2; 303 background = dirty / 2;
302 tsk = current; 304 tsk = current;
303 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) { 305 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk) || is_realtime(tsk)) {
304 background += background / 4; 306 background += background / 4;
305 dirty += dirty / 4; 307 dirty += dirty / 4;
306 } 308 }
@@ -328,7 +330,7 @@ static unsigned long zone_dirty_limit(struct zone *zone)
328 else 330 else
329 dirty = vm_dirty_ratio * zone_memory / 100; 331 dirty = vm_dirty_ratio * zone_memory / 100;
330 332
331 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) 333 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk) || is_realtime(tsk))
332 dirty += dirty / 4; 334 dirty += dirty / 4;
333 335
334 return dirty; 336 return dirty;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2ee0fd313f03..65299391e760 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -61,6 +61,8 @@
61#include <linux/hugetlb.h> 61#include <linux/hugetlb.h>
62#include <linux/sched/rt.h> 62#include <linux/sched/rt.h>
63 63
64#include <litmus/litmus.h> /* for is_realtime() */
65
64#include <asm/tlbflush.h> 66#include <asm/tlbflush.h>
65#include <asm/div64.h> 67#include <asm/div64.h>
66#include "internal.h" 68#include "internal.h"
@@ -2362,7 +2364,8 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
2362 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 2364 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
2363 */ 2365 */
2364 alloc_flags &= ~ALLOC_CPUSET; 2366 alloc_flags &= ~ALLOC_CPUSET;
2365 } else if (unlikely(rt_task(current)) && !in_interrupt()) 2367 } else if (unlikely(rt_task(current) || is_realtime(current))
2368 && !in_interrupt())
2366 alloc_flags |= ALLOC_HARDER; 2369 alloc_flags |= ALLOC_HARDER;
2367 2370
2368 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { 2371 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {