diff options
| author | Clark Williams <williams@redhat.com> | 2013-02-07 10:46:59 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2013-02-07 14:50:54 -0500 |
| commit | cf4aebc292fac7f34f8345664320e9d4a42ca76c (patch) | |
| tree | 6eceb9bb2d8382c4499366a8fee060688aad6107 /include/linux | |
| parent | b2c77a57e4a0a7877e357dead7ee8acc19944f3e (diff) | |
sched: Move sched.h sysctl bits into separate header
Move the sysctl-related bits from include/linux/sched.h into
a new file: include/linux/sched/sysctl.h. Then update source
files requiring access to those bits by including the new
header file.
Signed-off-by: Clark Williams <williams@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20130207094659.06dced96@riff.lan
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/sched.h | 91 | ||||
| -rw-r--r-- | include/linux/sched/sysctl.h | 97 |
2 files changed, 97 insertions, 91 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 719ee0815e3a..8fc9b2710a80 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -304,19 +304,6 @@ static inline void lockup_detector_init(void) | |||
| 304 | } | 304 | } |
| 305 | #endif | 305 | #endif |
| 306 | 306 | ||
| 307 | #ifdef CONFIG_DETECT_HUNG_TASK | ||
| 308 | extern unsigned int sysctl_hung_task_panic; | ||
| 309 | extern unsigned long sysctl_hung_task_check_count; | ||
| 310 | extern unsigned long sysctl_hung_task_timeout_secs; | ||
| 311 | extern unsigned long sysctl_hung_task_warnings; | ||
| 312 | extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, | ||
| 313 | void __user *buffer, | ||
| 314 | size_t *lenp, loff_t *ppos); | ||
| 315 | #else | ||
| 316 | /* Avoid need for ifdefs elsewhere in the code */ | ||
| 317 | enum { sysctl_hung_task_timeout_secs = 0 }; | ||
| 318 | #endif | ||
| 319 | |||
| 320 | /* Attach to any functions which should be ignored in wchan output. */ | 307 | /* Attach to any functions which should be ignored in wchan output. */ |
| 321 | #define __sched __attribute__((__section__(".sched.text"))) | 308 | #define __sched __attribute__((__section__(".sched.text"))) |
| 322 | 309 | ||
| @@ -338,23 +325,6 @@ extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); | |||
| 338 | struct nsproxy; | 325 | struct nsproxy; |
| 339 | struct user_namespace; | 326 | struct user_namespace; |
| 340 | 327 | ||
| 341 | /* | ||
| 342 | * Default maximum number of active map areas, this limits the number of vmas | ||
| 343 | * per mm struct. Users can overwrite this number by sysctl but there is a | ||
| 344 | * problem. | ||
| 345 | * | ||
| 346 | * When a program's coredump is generated as ELF format, a section is created | ||
| 347 | * per a vma. In ELF, the number of sections is represented in unsigned short. | ||
| 348 | * This means the number of sections should be smaller than 65535 at coredump. | ||
| 349 | * Because the kernel adds some informative sections to a image of program at | ||
| 350 | * generating coredump, we need some margin. The number of extra sections is | ||
| 351 | * 1-3 now and depends on arch. We use "5" as safe margin, here. | ||
| 352 | */ | ||
| 353 | #define MAPCOUNT_ELF_CORE_MARGIN (5) | ||
| 354 | #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) | ||
| 355 | |||
| 356 | extern int sysctl_max_map_count; | ||
| 357 | |||
| 358 | #include <linux/aio.h> | 328 | #include <linux/aio.h> |
| 359 | 329 | ||
| 360 | #ifdef CONFIG_MMU | 330 | #ifdef CONFIG_MMU |
| @@ -1221,12 +1191,6 @@ struct sched_rt_entity { | |||
| 1221 | #endif | 1191 | #endif |
| 1222 | }; | 1192 | }; |
| 1223 | 1193 | ||
| 1224 | /* | ||
| 1225 | * default timeslice is 100 msecs (used only for SCHED_RR tasks). | ||
| 1226 | * Timeslices get refilled after they expire. | ||
| 1227 | */ | ||
| 1228 | #define RR_TIMESLICE (100 * HZ / 1000) | ||
| 1229 | |||
| 1230 | struct rcu_node; | 1194 | struct rcu_node; |
| 1231 | 1195 | ||
| 1232 | enum perf_event_task_context { | 1196 | enum perf_event_task_context { |
| @@ -2074,58 +2038,7 @@ extern void wake_up_idle_cpu(int cpu); | |||
| 2074 | static inline void wake_up_idle_cpu(int cpu) { } | 2038 | static inline void wake_up_idle_cpu(int cpu) { } |
| 2075 | #endif | 2039 | #endif |
| 2076 | 2040 | ||
| 2077 | extern unsigned int sysctl_sched_latency; | ||
| 2078 | extern unsigned int sysctl_sched_min_granularity; | ||
| 2079 | extern unsigned int sysctl_sched_wakeup_granularity; | ||
| 2080 | extern unsigned int sysctl_sched_child_runs_first; | ||
| 2081 | |||
| 2082 | enum sched_tunable_scaling { | ||
| 2083 | SCHED_TUNABLESCALING_NONE, | ||
| 2084 | SCHED_TUNABLESCALING_LOG, | ||
| 2085 | SCHED_TUNABLESCALING_LINEAR, | ||
| 2086 | SCHED_TUNABLESCALING_END, | ||
| 2087 | }; | ||
| 2088 | extern enum sched_tunable_scaling sysctl_sched_tunable_scaling; | ||
| 2089 | |||
| 2090 | extern unsigned int sysctl_numa_balancing_scan_delay; | ||
| 2091 | extern unsigned int sysctl_numa_balancing_scan_period_min; | ||
| 2092 | extern unsigned int sysctl_numa_balancing_scan_period_max; | ||
| 2093 | extern unsigned int sysctl_numa_balancing_scan_period_reset; | ||
| 2094 | extern unsigned int sysctl_numa_balancing_scan_size; | ||
| 2095 | extern unsigned int sysctl_numa_balancing_settle_count; | ||
| 2096 | |||
| 2097 | #ifdef CONFIG_SCHED_DEBUG | ||
| 2098 | extern unsigned int sysctl_sched_migration_cost; | ||
| 2099 | extern unsigned int sysctl_sched_nr_migrate; | ||
| 2100 | extern unsigned int sysctl_sched_time_avg; | ||
| 2101 | extern unsigned int sysctl_timer_migration; | ||
| 2102 | extern unsigned int sysctl_sched_shares_window; | ||
| 2103 | |||
| 2104 | int sched_proc_update_handler(struct ctl_table *table, int write, | ||
| 2105 | void __user *buffer, size_t *length, | ||
| 2106 | loff_t *ppos); | ||
| 2107 | #endif | ||
| 2108 | #ifdef CONFIG_SCHED_DEBUG | ||
| 2109 | static inline unsigned int get_sysctl_timer_migration(void) | ||
| 2110 | { | ||
| 2111 | return sysctl_timer_migration; | ||
| 2112 | } | ||
| 2113 | #else | ||
| 2114 | static inline unsigned int get_sysctl_timer_migration(void) | ||
| 2115 | { | ||
| 2116 | return 1; | ||
| 2117 | } | ||
| 2118 | #endif | ||
| 2119 | extern unsigned int sysctl_sched_rt_period; | ||
| 2120 | extern int sysctl_sched_rt_runtime; | ||
| 2121 | |||
| 2122 | int sched_rt_handler(struct ctl_table *table, int write, | ||
| 2123 | void __user *buffer, size_t *lenp, | ||
| 2124 | loff_t *ppos); | ||
| 2125 | |||
| 2126 | #ifdef CONFIG_SCHED_AUTOGROUP | 2041 | #ifdef CONFIG_SCHED_AUTOGROUP |
| 2127 | extern unsigned int sysctl_sched_autogroup_enabled; | ||
| 2128 | |||
| 2129 | extern void sched_autogroup_create_attach(struct task_struct *p); | 2042 | extern void sched_autogroup_create_attach(struct task_struct *p); |
| 2130 | extern void sched_autogroup_detach(struct task_struct *p); | 2043 | extern void sched_autogroup_detach(struct task_struct *p); |
| 2131 | extern void sched_autogroup_fork(struct signal_struct *sig); | 2044 | extern void sched_autogroup_fork(struct signal_struct *sig); |
| @@ -2141,10 +2054,6 @@ static inline void sched_autogroup_fork(struct signal_struct *sig) { } | |||
| 2141 | static inline void sched_autogroup_exit(struct signal_struct *sig) { } | 2054 | static inline void sched_autogroup_exit(struct signal_struct *sig) { } |
| 2142 | #endif | 2055 | #endif |
| 2143 | 2056 | ||
| 2144 | #ifdef CONFIG_CFS_BANDWIDTH | ||
| 2145 | extern unsigned int sysctl_sched_cfs_bandwidth_slice; | ||
| 2146 | #endif | ||
| 2147 | |||
| 2148 | #ifdef CONFIG_RT_MUTEXES | 2057 | #ifdef CONFIG_RT_MUTEXES |
| 2149 | extern int rt_mutex_getprio(struct task_struct *p); | 2058 | extern int rt_mutex_getprio(struct task_struct *p); |
| 2150 | extern void rt_mutex_setprio(struct task_struct *p, int prio); | 2059 | extern void rt_mutex_setprio(struct task_struct *p, int prio); |
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h new file mode 100644 index 000000000000..bac914e458ca --- /dev/null +++ b/include/linux/sched/sysctl.h | |||
| @@ -0,0 +1,97 @@ | |||
| 1 | #ifndef _SCHED_SYSCTL_H | ||
| 2 | #define _SCHED_SYSCTL_H | ||
| 3 | |||
| 4 | #ifdef CONFIG_DETECT_HUNG_TASK | ||
| 5 | extern unsigned int sysctl_hung_task_panic; | ||
| 6 | extern unsigned long sysctl_hung_task_check_count; | ||
| 7 | extern unsigned long sysctl_hung_task_timeout_secs; | ||
| 8 | extern unsigned long sysctl_hung_task_warnings; | ||
| 9 | extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, | ||
| 10 | void __user *buffer, | ||
| 11 | size_t *lenp, loff_t *ppos); | ||
| 12 | #else | ||
| 13 | /* Avoid need for ifdefs elsewhere in the code */ | ||
| 14 | enum { sysctl_hung_task_timeout_secs = 0 }; | ||
| 15 | #endif | ||
| 16 | |||
| 17 | /* | ||
| 18 | * Default maximum number of active map areas, this limits the number of vmas | ||
| 19 | * per mm struct. Users can overwrite this number by sysctl but there is a | ||
| 20 | * problem. | ||
| 21 | * | ||
| 22 | * When a program's coredump is generated as ELF format, a section is created | ||
| 23 | * per a vma. In ELF, the number of sections is represented in unsigned short. | ||
| 24 | * This means the number of sections should be smaller than 65535 at coredump. | ||
| 25 | * Because the kernel adds some informative sections to a image of program at | ||
| 26 | * generating coredump, we need some margin. The number of extra sections is | ||
| 27 | * 1-3 now and depends on arch. We use "5" as safe margin, here. | ||
| 28 | */ | ||
| 29 | #define MAPCOUNT_ELF_CORE_MARGIN (5) | ||
| 30 | #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) | ||
| 31 | |||
| 32 | extern int sysctl_max_map_count; | ||
| 33 | |||
| 34 | extern unsigned int sysctl_sched_latency; | ||
| 35 | extern unsigned int sysctl_sched_min_granularity; | ||
| 36 | extern unsigned int sysctl_sched_wakeup_granularity; | ||
| 37 | extern unsigned int sysctl_sched_child_runs_first; | ||
| 38 | |||
| 39 | enum sched_tunable_scaling { | ||
| 40 | SCHED_TUNABLESCALING_NONE, | ||
| 41 | SCHED_TUNABLESCALING_LOG, | ||
| 42 | SCHED_TUNABLESCALING_LINEAR, | ||
| 43 | SCHED_TUNABLESCALING_END, | ||
| 44 | }; | ||
| 45 | extern enum sched_tunable_scaling sysctl_sched_tunable_scaling; | ||
| 46 | |||
| 47 | extern unsigned int sysctl_numa_balancing_scan_delay; | ||
| 48 | extern unsigned int sysctl_numa_balancing_scan_period_min; | ||
| 49 | extern unsigned int sysctl_numa_balancing_scan_period_max; | ||
| 50 | extern unsigned int sysctl_numa_balancing_scan_period_reset; | ||
| 51 | extern unsigned int sysctl_numa_balancing_scan_size; | ||
| 52 | extern unsigned int sysctl_numa_balancing_settle_count; | ||
| 53 | |||
| 54 | #ifdef CONFIG_SCHED_DEBUG | ||
| 55 | extern unsigned int sysctl_sched_migration_cost; | ||
| 56 | extern unsigned int sysctl_sched_nr_migrate; | ||
| 57 | extern unsigned int sysctl_sched_time_avg; | ||
| 58 | extern unsigned int sysctl_timer_migration; | ||
| 59 | extern unsigned int sysctl_sched_shares_window; | ||
| 60 | |||
| 61 | int sched_proc_update_handler(struct ctl_table *table, int write, | ||
| 62 | void __user *buffer, size_t *length, | ||
| 63 | loff_t *ppos); | ||
| 64 | #endif | ||
| 65 | #ifdef CONFIG_SCHED_DEBUG | ||
| 66 | static inline unsigned int get_sysctl_timer_migration(void) | ||
| 67 | { | ||
| 68 | return sysctl_timer_migration; | ||
| 69 | } | ||
| 70 | #else | ||
| 71 | static inline unsigned int get_sysctl_timer_migration(void) | ||
| 72 | { | ||
| 73 | return 1; | ||
| 74 | } | ||
| 75 | #endif | ||
| 76 | extern unsigned int sysctl_sched_rt_period; | ||
| 77 | extern int sysctl_sched_rt_runtime; | ||
| 78 | |||
| 79 | #ifdef CONFIG_CFS_BANDWIDTH | ||
| 80 | extern unsigned int sysctl_sched_cfs_bandwidth_slice; | ||
| 81 | #endif | ||
| 82 | |||
| 83 | #ifdef CONFIG_SCHED_AUTOGROUP | ||
| 84 | extern unsigned int sysctl_sched_autogroup_enabled; | ||
| 85 | #endif | ||
| 86 | |||
| 87 | /* | ||
| 88 | * default timeslice is 100 msecs (used only for SCHED_RR tasks). | ||
| 89 | * Timeslices get refilled after they expire. | ||
| 90 | */ | ||
| 91 | #define RR_TIMESLICE (100 * HZ / 1000) | ||
| 92 | |||
| 93 | int sched_rt_handler(struct ctl_table *table, int write, | ||
| 94 | void __user *buffer, size_t *lenp, | ||
| 95 | loff_t *ppos); | ||
| 96 | |||
| 97 | #endif /* _SCHED_SYSCTL_H */ | ||
