aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched')
-rw-r--r--include/linux/sched/rt.h64
-rw-r--r--include/linux/sched/sysctl.h104
2 files changed, 168 insertions, 0 deletions
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
new file mode 100644
index 000000000000..440434df3627
--- /dev/null
+++ b/include/linux/sched/rt.h
@@ -0,0 +1,64 @@
1#ifndef _SCHED_RT_H
2#define _SCHED_RT_H
3
4/*
5 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
6 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
7 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
8 * values are inverted: lower p->prio value means higher priority.
9 *
10 * The MAX_USER_RT_PRIO value allows the actual maximum
11 * RT priority to be separate from the value exported to
12 * user-space. This allows kernel threads to set their
13 * priority to a value higher than any user task. Note:
14 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
15 */
16
17#define MAX_USER_RT_PRIO 100
18#define MAX_RT_PRIO MAX_USER_RT_PRIO
19
20#define MAX_PRIO (MAX_RT_PRIO + 40)
21#define DEFAULT_PRIO (MAX_RT_PRIO + 20)
22
23static inline int rt_prio(int prio)
24{
25 if (unlikely(prio < MAX_RT_PRIO))
26 return 1;
27 return 0;
28}
29
30static inline int rt_task(struct task_struct *p)
31{
32 return rt_prio(p->prio);
33}
34
35#ifdef CONFIG_RT_MUTEXES
36extern int rt_mutex_getprio(struct task_struct *p);
37extern void rt_mutex_setprio(struct task_struct *p, int prio);
38extern void rt_mutex_adjust_pi(struct task_struct *p);
39static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
40{
41 return tsk->pi_blocked_on != NULL;
42}
43#else
44static inline int rt_mutex_getprio(struct task_struct *p)
45{
46 return p->normal_prio;
47}
48# define rt_mutex_adjust_pi(p) do { } while (0)
49static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
50{
51 return false;
52}
53#endif
54
55extern void normalize_rt_tasks(void);
56
57
58/*
59 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
60 * Timeslices get refilled after they expire.
61 */
62#define RR_TIMESLICE (100 * HZ / 1000)
63
64#endif /* _SCHED_RT_H */
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
new file mode 100644
index 000000000000..bf8086b2506e
--- /dev/null
+++ b/include/linux/sched/sysctl.h
@@ -0,0 +1,104 @@
1#ifndef _SCHED_SYSCTL_H
2#define _SCHED_SYSCTL_H
3
4#ifdef CONFIG_DETECT_HUNG_TASK
5extern unsigned int sysctl_hung_task_panic;
6extern unsigned long sysctl_hung_task_check_count;
7extern unsigned long sysctl_hung_task_timeout_secs;
8extern unsigned long sysctl_hung_task_warnings;
9extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
10 void __user *buffer,
11 size_t *lenp, loff_t *ppos);
12#else
13/* Avoid need for ifdefs elsewhere in the code */
14enum { sysctl_hung_task_timeout_secs = 0 };
15#endif
16
17/*
18 * Default maximum number of active map areas, this limits the number of vmas
19 * per mm struct. Users can overwrite this number by sysctl but there is a
20 * problem.
21 *
22 * When a program's coredump is generated as ELF format, a section is created
23 * per a vma. In ELF, the number of sections is represented in unsigned short.
24 * This means the number of sections should be smaller than 65535 at coredump.
25 * Because the kernel adds some informative sections to a image of program at
26 * generating coredump, we need some margin. The number of extra sections is
27 * 1-3 now and depends on arch. We use "5" as safe margin, here.
28 */
29#define MAPCOUNT_ELF_CORE_MARGIN (5)
30#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
31
32extern int sysctl_max_map_count;
33
34extern unsigned int sysctl_sched_latency;
35extern unsigned int sysctl_sched_min_granularity;
36extern unsigned int sysctl_sched_wakeup_granularity;
37extern unsigned int sysctl_sched_child_runs_first;
38
39enum sched_tunable_scaling {
40 SCHED_TUNABLESCALING_NONE,
41 SCHED_TUNABLESCALING_LOG,
42 SCHED_TUNABLESCALING_LINEAR,
43 SCHED_TUNABLESCALING_END,
44};
45extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
46
47extern unsigned int sysctl_numa_balancing_scan_delay;
48extern unsigned int sysctl_numa_balancing_scan_period_min;
49extern unsigned int sysctl_numa_balancing_scan_period_max;
50extern unsigned int sysctl_numa_balancing_scan_period_reset;
51extern unsigned int sysctl_numa_balancing_scan_size;
52extern unsigned int sysctl_numa_balancing_settle_count;
53
54#ifdef CONFIG_SCHED_DEBUG
55extern unsigned int sysctl_sched_migration_cost;
56extern unsigned int sysctl_sched_nr_migrate;
57extern unsigned int sysctl_sched_time_avg;
58extern unsigned int sysctl_timer_migration;
59extern unsigned int sysctl_sched_shares_window;
60
61int sched_proc_update_handler(struct ctl_table *table, int write,
62 void __user *buffer, size_t *length,
63 loff_t *ppos);
64#endif
65#ifdef CONFIG_SCHED_DEBUG
66static inline unsigned int get_sysctl_timer_migration(void)
67{
68 return sysctl_timer_migration;
69}
70#else
71static inline unsigned int get_sysctl_timer_migration(void)
72{
73 return 1;
74}
75#endif
76
77/*
78 * control realtime throttling:
79 *
80 * /proc/sys/kernel/sched_rt_period_us
81 * /proc/sys/kernel/sched_rt_runtime_us
82 */
83extern unsigned int sysctl_sched_rt_period;
84extern int sysctl_sched_rt_runtime;
85
86#ifdef CONFIG_CFS_BANDWIDTH
87extern unsigned int sysctl_sched_cfs_bandwidth_slice;
88#endif
89
90#ifdef CONFIG_SCHED_AUTOGROUP
91extern unsigned int sysctl_sched_autogroup_enabled;
92#endif
93
94extern int sched_rr_timeslice;
95
96extern int sched_rr_handler(struct ctl_table *table, int write,
97 void __user *buffer, size_t *lenp,
98 loff_t *ppos);
99
100extern int sched_rt_handler(struct ctl_table *table, int write,
101 void __user *buffer, size_t *lenp,
102 loff_t *ppos);
103
104#endif /* _SCHED_SYSCTL_H */