aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2015-08-09 07:18:50 -0400
committerBjoern Brandenburg <bbb@mpi-sws.org>2017-05-26 17:12:30 -0400
commit9cd62c8dc5bad42f2de65e9a50fed0fe2608a162 (patch)
treefafa75f7f34d0f1533b64795377e6a82b37d5244
parenta286e5f00ef48ab8d9b189370441ce90d855b306 (diff)
Augment rt_task() with is_realtime()
Whenever the kernel checks for rt_task() to avoid delaying real-time tasks, we want it to also not delay LITMUS^RT tasks. Hence, most calls to rt_task() should be matched by an equivalent call to is_realtime(). Notably, this affects the implementations of select() and nanosleep(), which use timer_slack_ns when setting up timers for non-real-time tasks.
-rw-r--r--fs/select.c4
-rw-r--r--kernel/locking/mutex.c4
-rw-r--r--kernel/locking/rwsem-xadd.c4
-rw-r--r--kernel/time/hrtimer.c4
-rw-r--r--mm/page-writeback.c6
-rw-r--r--mm/page_alloc.c6
6 files changed, 21 insertions, 7 deletions
diff --git a/fs/select.c b/fs/select.c
index 3d4f85defeab..a61843335182 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -31,6 +31,8 @@
31#include <net/busy_poll.h> 31#include <net/busy_poll.h>
32#include <linux/vmalloc.h> 32#include <linux/vmalloc.h>
33 33
34#include <litmus/litmus.h> /* for is_realtime() */
35
34#include <asm/uaccess.h> 36#include <asm/uaccess.h>
35 37
36 38
@@ -80,7 +82,7 @@ u64 select_estimate_accuracy(struct timespec64 *tv)
80 * Realtime tasks get a slack of 0 for obvious reasons. 82 * Realtime tasks get a slack of 0 for obvious reasons.
81 */ 83 */
82 84
83 if (rt_task(current)) 85 if (rt_task(current) || is_realtime(current))
84 return 0; 86 return 0;
85 87
86 ktime_get_ts64(&now); 88 ktime_get_ts64(&now);
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index a70b90db3909..cfb3552caa6e 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -27,6 +27,8 @@
27#include <linux/debug_locks.h> 27#include <linux/debug_locks.h>
28#include <linux/osq_lock.h> 28#include <linux/osq_lock.h>
29 29
30#include <litmus/litmus.h> /* for is_realtime() */
31
30/* 32/*
31 * In the DEBUG case we are using the "NULL fastpath" for mutexes, 33 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
32 * which forces all calls into the slowpath: 34 * which forces all calls into the slowpath:
@@ -368,7 +370,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
368 * we're an RT task that will live-lock because we won't let 370 * we're an RT task that will live-lock because we won't let
369 * the owner complete. 371 * the owner complete.
370 */ 372 */
371 if (!owner && (need_resched() || rt_task(task))) 373 if (!owner && (need_resched() || rt_task(task) || is_realtime(task)))
372 break; 374 break;
373 375
374 /* 376 /*
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 2337b4bb2366..2badbb915c53 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -18,6 +18,8 @@
18 18
19#include "rwsem.h" 19#include "rwsem.h"
20 20
21#include <litmus/litmus.h> /* for is_realtime() */
22
21/* 23/*
22 * Guide to the rw_semaphore's count field for common values. 24 * Guide to the rw_semaphore's count field for common values.
23 * (32-bit case illustrated, similar for 64-bit) 25 * (32-bit case illustrated, similar for 64-bit)
@@ -414,7 +416,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
414 * we're an RT task that will live-lock because we won't let 416 * we're an RT task that will live-lock because we won't let
415 * the owner complete. 417 * the owner complete.
416 */ 418 */
417 if (!sem->owner && (need_resched() || rt_task(current))) 419 if (!sem->owner && (need_resched() || rt_task(current) || is_realtime(current)))
418 break; 420 break;
419 421
420 /* 422 /*
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index bb5ec425dfe0..49da37510d78 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -56,6 +56,8 @@
56 56
57#include "tick-internal.h" 57#include "tick-internal.h"
58 58
59#include <litmus/litmus.h> /* for is_realtime() */
60
59/* 61/*
60 * The timer bases: 62 * The timer bases:
61 * 63 *
@@ -1541,7 +1543,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
1541 u64 slack; 1543 u64 slack;
1542 1544
1543 slack = current->timer_slack_ns; 1545 slack = current->timer_slack_ns;
1544 if (dl_task(current) || rt_task(current)) 1546 if (dl_task(current) || rt_task(current) || is_realtime(current))
1545 slack = 0; 1547 slack = 0;
1546 1548
1547 hrtimer_init_on_stack(&t.timer, clockid, mode); 1549 hrtimer_init_on_stack(&t.timer, clockid, mode);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 439cc63ad903..9073c0889192 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -41,6 +41,8 @@
41 41
42#include "internal.h" 42#include "internal.h"
43 43
44#include <litmus/litmus.h> /* for is_realtime() */
45
44/* 46/*
45 * Sleep at most 200ms at a time in balance_dirty_pages(). 47 * Sleep at most 200ms at a time in balance_dirty_pages().
46 */ 48 */
@@ -435,7 +437,7 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
435 if (bg_thresh >= thresh) 437 if (bg_thresh >= thresh)
436 bg_thresh = thresh / 2; 438 bg_thresh = thresh / 2;
437 tsk = current; 439 tsk = current;
438 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) { 440 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk) || is_realtime(tsk)) {
439 bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32; 441 bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
440 thresh += thresh / 4 + global_wb_domain.dirty_limit / 32; 442 thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
441 } 443 }
@@ -485,7 +487,7 @@ static unsigned long node_dirty_limit(struct pglist_data *pgdat)
485 else 487 else
486 dirty = vm_dirty_ratio * node_memory / 100; 488 dirty = vm_dirty_ratio * node_memory / 100;
487 489
488 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) 490 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk) || is_realtime(tsk))
489 dirty += dirty / 4; 491 dirty += dirty / 4;
490 492
491 return dirty; 493 return dirty;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5b06fb385dd7..7c51a38d5d53 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -66,6 +66,9 @@
66#include <linux/memcontrol.h> 66#include <linux/memcontrol.h>
67 67
68#include <asm/sections.h> 68#include <asm/sections.h>
69
70#include <litmus/litmus.h> /* for is_realtime() */
71
69#include <asm/tlbflush.h> 72#include <asm/tlbflush.h>
70#include <asm/div64.h> 73#include <asm/div64.h>
71#include "internal.h" 74#include "internal.h"
@@ -3365,7 +3368,8 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
3365 * comment for __cpuset_node_allowed(). 3368 * comment for __cpuset_node_allowed().
3366 */ 3369 */
3367 alloc_flags &= ~ALLOC_CPUSET; 3370 alloc_flags &= ~ALLOC_CPUSET;
3368 } else if (unlikely(rt_task(current)) && !in_interrupt()) 3371 } else if (unlikely(rt_task(current) || is_realtime(current))
3372 && !in_interrupt())
3369 alloc_flags |= ALLOC_HARDER; 3373 alloc_flags |= ALLOC_HARDER;
3370 3374
3371#ifdef CONFIG_CMA 3375#ifdef CONFIG_CMA