aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2015-08-09 07:18:50 -0400
committerBjoern Brandenburg <bbb@mpi-sws.org>2015-08-09 07:20:13 -0400
commit13e68a73175041c83713fd1905c65fe612f73862 (patch)
treebe3edeaac8b1b34714dd75d54d17f1ec88ecac35
parente82bc71470695e6a8a319962737b4abff85bf0c6 (diff)
Augment rt_task() with is_realtime()
Whenever the kernel checks for rt_task() to avoid delaying real-time tasks, we want it to also not delay LITMUS^RT tasks. Hence, most calls to rt_task() should be matched by an equivalent call to is_realtime(). Notably, this affects the implementations of select() and nanosleep(), which use timer_slack_ns when setting up timers for non-real-time tasks.
-rw-r--r--fs/select.c4
-rw-r--r--kernel/locking/mutex.c4
-rw-r--r--kernel/locking/rwsem-xadd.c4
-rw-r--r--kernel/time/hrtimer.c4
-rw-r--r--mm/page-writeback.c6
-rw-r--r--mm/page_alloc.c6
6 files changed, 21 insertions, 7 deletions
diff --git a/fs/select.c b/fs/select.c
index f684c750e08a..fd65d1b6aeb6 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -30,6 +30,8 @@
30#include <linux/freezer.h> 30#include <linux/freezer.h>
31#include <net/busy_poll.h> 31#include <net/busy_poll.h>
32 32
33#include <litmus/litmus.h> /* for is_realtime() */
34
33#include <asm/uaccess.h> 35#include <asm/uaccess.h>
34 36
35 37
@@ -79,7 +81,7 @@ long select_estimate_accuracy(struct timespec *tv)
79 * Realtime tasks get a slack of 0 for obvious reasons. 81 * Realtime tasks get a slack of 0 for obvious reasons.
80 */ 82 */
81 83
82 if (rt_task(current)) 84 if (rt_task(current) || is_realtime(current))
83 return 0; 85 return 0;
84 86
85 ktime_get_ts(&now); 87 ktime_get_ts(&now);
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 4cccea6b8934..a8546e29671e 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -27,6 +27,8 @@
27#include <linux/debug_locks.h> 27#include <linux/debug_locks.h>
28#include <linux/osq_lock.h> 28#include <linux/osq_lock.h>
29 29
30#include <litmus/litmus.h> /* for is_realtime() */
31
30/* 32/*
31 * In the DEBUG case we are using the "NULL fastpath" for mutexes, 33 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
32 * which forces all calls into the slowpath: 34 * which forces all calls into the slowpath:
@@ -368,7 +370,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
368 * we're an RT task that will live-lock because we won't let 370 * we're an RT task that will live-lock because we won't let
369 * the owner complete. 371 * the owner complete.
370 */ 372 */
371 if (!owner && (need_resched() || rt_task(task))) 373 if (!owner && (need_resched() || rt_task(task) || is_realtime(task)))
372 break; 374 break;
373 375
374 /* 376 /*
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 3417d0172a5d..873ffca2321c 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -18,6 +18,8 @@
18 18
19#include "rwsem.h" 19#include "rwsem.h"
20 20
21#include <litmus/litmus.h> /* for is_realtime() */
22
21/* 23/*
22 * Guide to the rw_semaphore's count field for common values. 24 * Guide to the rw_semaphore's count field for common values.
23 * (32-bit case illustrated, similar for 64-bit) 25 * (32-bit case illustrated, similar for 64-bit)
@@ -392,7 +394,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
392 * we're an RT task that will live-lock because we won't let 394 * we're an RT task that will live-lock because we won't let
393 * the owner complete. 395 * the owner complete.
394 */ 396 */
395 if (!owner && (need_resched() || rt_task(current))) 397 if (!owner && (need_resched() || rt_task(current) || is_realtime(current)))
396 break; 398 break;
397 399
398 /* 400 /*
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 22f9156f19d2..d5a8e4db0bf9 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -58,6 +58,8 @@
58 58
59#include "tick-internal.h" 59#include "tick-internal.h"
60 60
61#include <litmus/litmus.h> /* for is_realtime() */
62
61/* 63/*
62 * The timer bases: 64 * The timer bases:
63 * 65 *
@@ -1659,7 +1661,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
1659 unsigned long slack; 1661 unsigned long slack;
1660 1662
1661 slack = current->timer_slack_ns; 1663 slack = current->timer_slack_ns;
1662 if (dl_task(current) || rt_task(current)) 1664 if (dl_task(current) || rt_task(current) || is_realtime(current))
1663 slack = 0; 1665 slack = 0;
1664 1666
1665 hrtimer_init_on_stack(&t.timer, clockid, mode); 1667 hrtimer_init_on_stack(&t.timer, clockid, mode);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index eb59f7eea508..7e39ffceb566 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -41,6 +41,8 @@
41 41
42#include "internal.h" 42#include "internal.h"
43 43
44#include <litmus/litmus.h> /* for is_realtime() */
45
44/* 46/*
45 * Sleep at most 200ms at a time in balance_dirty_pages(). 47 * Sleep at most 200ms at a time in balance_dirty_pages().
46 */ 48 */
@@ -279,7 +281,7 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
279 if (background >= dirty) 281 if (background >= dirty)
280 background = dirty / 2; 282 background = dirty / 2;
281 tsk = current; 283 tsk = current;
282 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) { 284 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk) || is_realtime(tsk)) {
283 background += background / 4; 285 background += background / 4;
284 dirty += dirty / 4; 286 dirty += dirty / 4;
285 } 287 }
@@ -307,7 +309,7 @@ static unsigned long zone_dirty_limit(struct zone *zone)
307 else 309 else
308 dirty = vm_dirty_ratio * zone_memory / 100; 310 dirty = vm_dirty_ratio * zone_memory / 100;
309 311
310 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) 312 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk) || is_realtime(tsk))
311 dirty += dirty / 4; 313 dirty += dirty / 4;
312 314
313 return dirty; 315 return dirty;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ebffa0e4a9c0..950c002bbb45 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -63,6 +63,9 @@
63#include <linux/page_owner.h> 63#include <linux/page_owner.h>
64 64
65#include <asm/sections.h> 65#include <asm/sections.h>
66
67#include <litmus/litmus.h> /* for is_realtime() */
68
66#include <asm/tlbflush.h> 69#include <asm/tlbflush.h>
67#include <asm/div64.h> 70#include <asm/div64.h>
68#include "internal.h" 71#include "internal.h"
@@ -2617,7 +2620,8 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
2617 * comment for __cpuset_node_allowed(). 2620 * comment for __cpuset_node_allowed().
2618 */ 2621 */
2619 alloc_flags &= ~ALLOC_CPUSET; 2622 alloc_flags &= ~ALLOC_CPUSET;
2620 } else if (unlikely(rt_task(current)) && !in_interrupt()) 2623 } else if (unlikely(rt_task(current) || is_realtime(current))
2624 && !in_interrupt())
2621 alloc_flags |= ALLOC_HARDER; 2625 alloc_flags |= ALLOC_HARDER;
2622 2626
2623 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { 2627 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {