diff options
Diffstat (limited to 'include/linux/sched')
-rw-r--r-- | include/linux/sched/rt.h | 64 | ||||
-rw-r--r-- | include/linux/sched/sysctl.h | 104 |
2 files changed, 168 insertions, 0 deletions
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h new file mode 100644 index 000000000000..440434df3627 --- /dev/null +++ b/include/linux/sched/rt.h | |||
@@ -0,0 +1,64 @@ | |||
1 | #ifndef _SCHED_RT_H | ||
2 | #define _SCHED_RT_H | ||
3 | |||
4 | /* | ||
5 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT | ||
6 | * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH | ||
7 | * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority | ||
8 | * values are inverted: lower p->prio value means higher priority. | ||
9 | * | ||
10 | * The MAX_USER_RT_PRIO value allows the actual maximum | ||
11 | * RT priority to be separate from the value exported to | ||
12 | * user-space. This allows kernel threads to set their | ||
13 | * priority to a value higher than any user task. Note: | ||
14 | * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. | ||
15 | */ | ||
16 | |||
17 | #define MAX_USER_RT_PRIO 100 | ||
18 | #define MAX_RT_PRIO MAX_USER_RT_PRIO | ||
19 | |||
20 | #define MAX_PRIO (MAX_RT_PRIO + 40) | ||
21 | #define DEFAULT_PRIO (MAX_RT_PRIO + 20) | ||
22 | |||
23 | static inline int rt_prio(int prio) | ||
24 | { | ||
25 | if (unlikely(prio < MAX_RT_PRIO)) | ||
26 | return 1; | ||
27 | return 0; | ||
28 | } | ||
29 | |||
30 | static inline int rt_task(struct task_struct *p) | ||
31 | { | ||
32 | return rt_prio(p->prio); | ||
33 | } | ||
34 | |||
35 | #ifdef CONFIG_RT_MUTEXES | ||
36 | extern int rt_mutex_getprio(struct task_struct *p); | ||
37 | extern void rt_mutex_setprio(struct task_struct *p, int prio); | ||
38 | extern void rt_mutex_adjust_pi(struct task_struct *p); | ||
39 | static inline bool tsk_is_pi_blocked(struct task_struct *tsk) | ||
40 | { | ||
41 | return tsk->pi_blocked_on != NULL; | ||
42 | } | ||
43 | #else | ||
44 | static inline int rt_mutex_getprio(struct task_struct *p) | ||
45 | { | ||
46 | return p->normal_prio; | ||
47 | } | ||
48 | # define rt_mutex_adjust_pi(p) do { } while (0) | ||
49 | static inline bool tsk_is_pi_blocked(struct task_struct *tsk) | ||
50 | { | ||
51 | return false; | ||
52 | } | ||
53 | #endif | ||
54 | |||
55 | extern void normalize_rt_tasks(void); | ||
56 | |||
57 | |||
58 | /* | ||
59 | * default timeslice is 100 msecs (used only for SCHED_RR tasks). | ||
60 | * Timeslices get refilled after they expire. | ||
61 | */ | ||
62 | #define RR_TIMESLICE (100 * HZ / 1000) | ||
63 | |||
64 | #endif /* _SCHED_RT_H */ | ||
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h new file mode 100644 index 000000000000..bf8086b2506e --- /dev/null +++ b/include/linux/sched/sysctl.h | |||
@@ -0,0 +1,104 @@ | |||
1 | #ifndef _SCHED_SYSCTL_H | ||
2 | #define _SCHED_SYSCTL_H | ||
3 | |||
4 | #ifdef CONFIG_DETECT_HUNG_TASK | ||
5 | extern unsigned int sysctl_hung_task_panic; | ||
6 | extern unsigned long sysctl_hung_task_check_count; | ||
7 | extern unsigned long sysctl_hung_task_timeout_secs; | ||
8 | extern unsigned long sysctl_hung_task_warnings; | ||
9 | extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, | ||
10 | void __user *buffer, | ||
11 | size_t *lenp, loff_t *ppos); | ||
12 | #else | ||
13 | /* Avoid need for ifdefs elsewhere in the code */ | ||
14 | enum { sysctl_hung_task_timeout_secs = 0 }; | ||
15 | #endif | ||
16 | |||
17 | /* | ||
18 | * Default maximum number of active map areas, this limits the number of vmas | ||
19 | * per mm struct. Users can overwrite this number by sysctl but there is a | ||
20 | * problem. | ||
21 | * | ||
22 | * When a program's coredump is generated as ELF format, a section is created | ||
23 | * per a vma. In ELF, the number of sections is represented in unsigned short. | ||
24 | * This means the number of sections should be smaller than 65535 at coredump. | ||
25 | * Because the kernel adds some informative sections to a image of program at | ||
26 | * generating coredump, we need some margin. The number of extra sections is | ||
27 | * 1-3 now and depends on arch. We use "5" as safe margin, here. | ||
28 | */ | ||
29 | #define MAPCOUNT_ELF_CORE_MARGIN (5) | ||
30 | #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) | ||
31 | |||
32 | extern int sysctl_max_map_count; | ||
33 | |||
34 | extern unsigned int sysctl_sched_latency; | ||
35 | extern unsigned int sysctl_sched_min_granularity; | ||
36 | extern unsigned int sysctl_sched_wakeup_granularity; | ||
37 | extern unsigned int sysctl_sched_child_runs_first; | ||
38 | |||
39 | enum sched_tunable_scaling { | ||
40 | SCHED_TUNABLESCALING_NONE, | ||
41 | SCHED_TUNABLESCALING_LOG, | ||
42 | SCHED_TUNABLESCALING_LINEAR, | ||
43 | SCHED_TUNABLESCALING_END, | ||
44 | }; | ||
45 | extern enum sched_tunable_scaling sysctl_sched_tunable_scaling; | ||
46 | |||
47 | extern unsigned int sysctl_numa_balancing_scan_delay; | ||
48 | extern unsigned int sysctl_numa_balancing_scan_period_min; | ||
49 | extern unsigned int sysctl_numa_balancing_scan_period_max; | ||
50 | extern unsigned int sysctl_numa_balancing_scan_period_reset; | ||
51 | extern unsigned int sysctl_numa_balancing_scan_size; | ||
52 | extern unsigned int sysctl_numa_balancing_settle_count; | ||
53 | |||
54 | #ifdef CONFIG_SCHED_DEBUG | ||
55 | extern unsigned int sysctl_sched_migration_cost; | ||
56 | extern unsigned int sysctl_sched_nr_migrate; | ||
57 | extern unsigned int sysctl_sched_time_avg; | ||
58 | extern unsigned int sysctl_timer_migration; | ||
59 | extern unsigned int sysctl_sched_shares_window; | ||
60 | |||
61 | int sched_proc_update_handler(struct ctl_table *table, int write, | ||
62 | void __user *buffer, size_t *length, | ||
63 | loff_t *ppos); | ||
64 | #endif | ||
65 | #ifdef CONFIG_SCHED_DEBUG | ||
66 | static inline unsigned int get_sysctl_timer_migration(void) | ||
67 | { | ||
68 | return sysctl_timer_migration; | ||
69 | } | ||
70 | #else | ||
71 | static inline unsigned int get_sysctl_timer_migration(void) | ||
72 | { | ||
73 | return 1; | ||
74 | } | ||
75 | #endif | ||
76 | |||
77 | /* | ||
78 | * control realtime throttling: | ||
79 | * | ||
80 | * /proc/sys/kernel/sched_rt_period_us | ||
81 | * /proc/sys/kernel/sched_rt_runtime_us | ||
82 | */ | ||
83 | extern unsigned int sysctl_sched_rt_period; | ||
84 | extern int sysctl_sched_rt_runtime; | ||
85 | |||
86 | #ifdef CONFIG_CFS_BANDWIDTH | ||
87 | extern unsigned int sysctl_sched_cfs_bandwidth_slice; | ||
88 | #endif | ||
89 | |||
90 | #ifdef CONFIG_SCHED_AUTOGROUP | ||
91 | extern unsigned int sysctl_sched_autogroup_enabled; | ||
92 | #endif | ||
93 | |||
94 | extern int sched_rr_timeslice; | ||
95 | |||
96 | extern int sched_rr_handler(struct ctl_table *table, int write, | ||
97 | void __user *buffer, size_t *lenp, | ||
98 | loff_t *ppos); | ||
99 | |||
100 | extern int sched_rt_handler(struct ctl_table *table, int write, | ||
101 | void __user *buffer, size_t *lenp, | ||
102 | loff_t *ppos); | ||
103 | |||
104 | #endif /* _SCHED_SYSCTL_H */ | ||