aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorClark Williams <williams@redhat.com>2013-02-07 10:47:07 -0500
committerIngo Molnar <mingo@kernel.org>2013-02-07 14:51:08 -0500
commit8bd75c77b7c6a3954140dd2e20346aef3efe4a35 (patch)
tree10e0d451a58aeb6c8f48b871a848276bf3a8a359 /include
parentce0dbbbb30aee6a835511d5be446462388ba9eee (diff)
sched/rt: Move rt specific bits into new header file
Move rt scheduler definitions out of include/linux/sched.h into new file include/linux/sched/rt.h Signed-off-by: Clark Williams <williams@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Link: http://lkml.kernel.org/r/20130207094707.7b9f825f@riff.lan Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/sched.h55
-rw-r--r--include/linux/sched/rt.h58
2 files changed, 60 insertions, 53 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8fc9b2710a80..33cc42130371 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1164,6 +1164,7 @@ struct sched_entity {
1164 /* rq "owned" by this entity/group: */ 1164 /* rq "owned" by this entity/group: */
1165 struct cfs_rq *my_q; 1165 struct cfs_rq *my_q;
1166#endif 1166#endif
1167
1167/* 1168/*
1168 * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be 1169 * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
1169 * removed when useful for applications beyond shares distribution (e.g. 1170 * removed when useful for applications beyond shares distribution (e.g.
@@ -1191,6 +1192,7 @@ struct sched_rt_entity {
1191#endif 1192#endif
1192}; 1193};
1193 1194
1195
1194struct rcu_node; 1196struct rcu_node;
1195 1197
1196enum perf_event_task_context { 1198enum perf_event_task_context {
@@ -1596,37 +1598,6 @@ static inline void set_numabalancing_state(bool enabled)
1596} 1598}
1597#endif 1599#endif
1598 1600
1599/*
1600 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
1601 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
1602 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
1603 * values are inverted: lower p->prio value means higher priority.
1604 *
1605 * The MAX_USER_RT_PRIO value allows the actual maximum
1606 * RT priority to be separate from the value exported to
1607 * user-space. This allows kernel threads to set their
1608 * priority to a value higher than any user task. Note:
1609 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
1610 */
1611
1612#define MAX_USER_RT_PRIO 100
1613#define MAX_RT_PRIO MAX_USER_RT_PRIO
1614
1615#define MAX_PRIO (MAX_RT_PRIO + 40)
1616#define DEFAULT_PRIO (MAX_RT_PRIO + 20)
1617
1618static inline int rt_prio(int prio)
1619{
1620 if (unlikely(prio < MAX_RT_PRIO))
1621 return 1;
1622 return 0;
1623}
1624
1625static inline int rt_task(struct task_struct *p)
1626{
1627 return rt_prio(p->prio);
1628}
1629
1630static inline struct pid *task_pid(struct task_struct *task) 1601static inline struct pid *task_pid(struct task_struct *task)
1631{ 1602{
1632 return task->pids[PIDTYPE_PID].pid; 1603 return task->pids[PIDTYPE_PID].pid;
@@ -2054,26 +2025,6 @@ static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2054static inline void sched_autogroup_exit(struct signal_struct *sig) { } 2025static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2055#endif 2026#endif
2056 2027
2057#ifdef CONFIG_RT_MUTEXES
2058extern int rt_mutex_getprio(struct task_struct *p);
2059extern void rt_mutex_setprio(struct task_struct *p, int prio);
2060extern void rt_mutex_adjust_pi(struct task_struct *p);
2061static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2062{
2063 return tsk->pi_blocked_on != NULL;
2064}
2065#else
2066static inline int rt_mutex_getprio(struct task_struct *p)
2067{
2068 return p->normal_prio;
2069}
2070# define rt_mutex_adjust_pi(p) do { } while (0)
2071static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2072{
2073 return false;
2074}
2075#endif
2076
2077extern bool yield_to(struct task_struct *p, bool preempt); 2028extern bool yield_to(struct task_struct *p, bool preempt);
2078extern void set_user_nice(struct task_struct *p, long nice); 2029extern void set_user_nice(struct task_struct *p, long nice);
2079extern int task_prio(const struct task_struct *p); 2030extern int task_prio(const struct task_struct *p);
@@ -2703,8 +2654,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2703extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); 2654extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2704extern long sched_getaffinity(pid_t pid, struct cpumask *mask); 2655extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2705 2656
2706extern void normalize_rt_tasks(void);
2707
2708#ifdef CONFIG_CGROUP_SCHED 2657#ifdef CONFIG_CGROUP_SCHED
2709 2658
2710extern struct task_group root_task_group; 2659extern struct task_group root_task_group;
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
new file mode 100644
index 000000000000..94e19ea28fc3
--- /dev/null
+++ b/include/linux/sched/rt.h
@@ -0,0 +1,58 @@
1#ifndef _SCHED_RT_H
2#define _SCHED_RT_H
3
4/*
5 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
6 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
7 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
8 * values are inverted: lower p->prio value means higher priority.
9 *
10 * The MAX_USER_RT_PRIO value allows the actual maximum
11 * RT priority to be separate from the value exported to
12 * user-space. This allows kernel threads to set their
13 * priority to a value higher than any user task. Note:
14 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
15 */
16
17#define MAX_USER_RT_PRIO 100
18#define MAX_RT_PRIO MAX_USER_RT_PRIO
19
20#define MAX_PRIO (MAX_RT_PRIO + 40)
21#define DEFAULT_PRIO (MAX_RT_PRIO + 20)
22
23static inline int rt_prio(int prio)
24{
25 if (unlikely(prio < MAX_RT_PRIO))
26 return 1;
27 return 0;
28}
29
30static inline int rt_task(struct task_struct *p)
31{
32 return rt_prio(p->prio);
33}
34
35#ifdef CONFIG_RT_MUTEXES
36extern int rt_mutex_getprio(struct task_struct *p);
37extern void rt_mutex_setprio(struct task_struct *p, int prio);
38extern void rt_mutex_adjust_pi(struct task_struct *p);
39static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
40{
41 return tsk->pi_blocked_on != NULL;
42}
43#else
44static inline int rt_mutex_getprio(struct task_struct *p)
45{
46 return p->normal_prio;
47}
48# define rt_mutex_adjust_pi(p) do { } while (0)
49static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
50{
51 return false;
52}
53#endif
54
55extern void normalize_rt_tasks(void);
56
57
58#endif /* _SCHED_RT_H */