From 13e68a73175041c83713fd1905c65fe612f73862 Mon Sep 17 00:00:00 2001 From: Bjoern Brandenburg Date: Sun, 9 Aug 2015 13:18:50 +0200 Subject: Augment rt_task() with is_realtime() Whenever the kernel checks for rt_task() to avoid delaying real-time tasks, we want it to also not delay LITMUS^RT tasks. Hence, most calls to rt_task() should be matched by an equivalent call to is_realtime(). Notably, this affects the implementations of select() and nanosleep(), which use timer_slack_ns when setting up timers for non-real-time tasks. --- fs/select.c | 4 +++- kernel/locking/mutex.c | 4 +++- kernel/locking/rwsem-xadd.c | 4 +++- kernel/time/hrtimer.c | 4 +++- mm/page-writeback.c | 6 ++++-- mm/page_alloc.c | 6 +++++- 6 files changed, 21 insertions(+), 7 deletions(-) diff --git a/fs/select.c b/fs/select.c index f684c750e08a..fd65d1b6aeb6 100644 --- a/fs/select.c +++ b/fs/select.c @@ -30,6 +30,8 @@ #include #include +#include /* for is_realtime() */ + #include @@ -79,7 +81,7 @@ long select_estimate_accuracy(struct timespec *tv) * Realtime tasks get a slack of 0 for obvious reasons. */ - if (rt_task(current)) + if (rt_task(current) || is_realtime(current)) return 0; ktime_get_ts(&now); diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 4cccea6b8934..a8546e29671e 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -27,6 +27,8 @@ #include #include +#include /* for is_realtime() */ + /* * In the DEBUG case we are using the "NULL fastpath" for mutexes, * which forces all calls into the slowpath: @@ -368,7 +370,7 @@ static bool mutex_optimistic_spin(struct mutex *lock, * we're an RT task that will live-lock because we won't let * the owner complete. */ - if (!owner && (need_resched() || rt_task(task))) + if (!owner && (need_resched() || rt_task(task) || is_realtime(task))) break; /* diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index 3417d0172a5d..873ffca2321c 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c @@ -18,6 +18,8 @@ #include "rwsem.h" +#include /* for is_realtime() */ + /* * Guide to the rw_semaphore's count field for common values. * (32-bit case illustrated, similar for 64-bit) @@ -392,7 +394,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem) * we're an RT task that will live-lock because we won't let * the owner complete. */ - if (!owner && (need_resched() || rt_task(current))) + if (!owner && (need_resched() || rt_task(current) || is_realtime(current))) break; /* diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 22f9156f19d2..d5a8e4db0bf9 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -58,6 +58,8 @@ #include "tick-internal.h" +#include /* for is_realtime() */ + /* * The timer bases: * @@ -1659,7 +1661,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, unsigned long slack; slack = current->timer_slack_ns; - if (dl_task(current) || rt_task(current)) + if (dl_task(current) || rt_task(current) || is_realtime(current)) slack = 0; hrtimer_init_on_stack(&t.timer, clockid, mode); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index eb59f7eea508..7e39ffceb566 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -41,6 +41,8 @@ #include "internal.h" +#include /* for is_realtime() */ + /* * Sleep at most 200ms at a time in balance_dirty_pages(). */ @@ -279,7 +281,7 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) if (background >= dirty) background = dirty / 2; tsk = current; - if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) { + if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk) || is_realtime(tsk)) { background += background / 4; dirty += dirty / 4; } @@ -307,7 +309,7 @@ static unsigned long zone_dirty_limit(struct zone *zone) else dirty = vm_dirty_ratio * zone_memory / 100; - if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) + if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk) || is_realtime(tsk)) dirty += dirty / 4; return dirty; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ebffa0e4a9c0..950c002bbb45 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -63,6 +63,9 @@ #include #include + +#include /* for is_realtime() */ + #include #include #include "internal.h" @@ -2617,7 +2620,8 @@ gfp_to_alloc_flags(gfp_t gfp_mask) * comment for __cpuset_node_allowed(). */ alloc_flags &= ~ALLOC_CPUSET; - } else if (unlikely(rt_task(current)) && !in_interrupt()) + } else if (unlikely(rt_task(current) || is_realtime(current)) + && !in_interrupt()) alloc_flags |= ALLOC_HARDER; if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { -- cgit v1.2.2