aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h90
1 files changed, 38 insertions, 52 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0478888c6899..1e2a6db2d7dd 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -272,19 +272,10 @@ extern int runqueue_is_locked(int cpu);
272 272
273extern cpumask_var_t nohz_cpu_mask; 273extern cpumask_var_t nohz_cpu_mask;
274#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 274#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
275extern int select_nohz_load_balancer(int cpu); 275extern void select_nohz_load_balancer(int stop_tick);
276extern int get_nohz_load_balancer(void); 276extern int get_nohz_timer_target(void);
277extern int nohz_ratelimit(int cpu);
278#else 277#else
279static inline int select_nohz_load_balancer(int cpu) 278static inline void select_nohz_load_balancer(int stop_tick) { }
280{
281 return 0;
282}
283
284static inline int nohz_ratelimit(int cpu)
285{
286 return 0;
287}
288#endif 279#endif
289 280
290/* 281/*
@@ -316,20 +307,16 @@ extern void scheduler_tick(void);
316 307
317extern void sched_show_task(struct task_struct *p); 308extern void sched_show_task(struct task_struct *p);
318 309
319#ifdef CONFIG_DETECT_SOFTLOCKUP 310#ifdef CONFIG_LOCKUP_DETECTOR
320extern void softlockup_tick(void);
321extern void touch_softlockup_watchdog(void); 311extern void touch_softlockup_watchdog(void);
322extern void touch_softlockup_watchdog_sync(void); 312extern void touch_softlockup_watchdog_sync(void);
323extern void touch_all_softlockup_watchdogs(void); 313extern void touch_all_softlockup_watchdogs(void);
324extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, 314extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
325 void __user *buffer, 315 void __user *buffer,
326 size_t *lenp, loff_t *ppos); 316 size_t *lenp, loff_t *ppos);
327extern unsigned int softlockup_panic; 317extern unsigned int softlockup_panic;
328extern int softlockup_thresh; 318extern int softlockup_thresh;
329#else 319#else
330static inline void softlockup_tick(void)
331{
332}
333static inline void touch_softlockup_watchdog(void) 320static inline void touch_softlockup_watchdog(void)
334{ 321{
335} 322}
@@ -634,7 +621,8 @@ struct signal_struct {
634 struct tty_audit_buf *tty_audit_buf; 621 struct tty_audit_buf *tty_audit_buf;
635#endif 622#endif
636 623
637 int oom_adj; /* OOM kill score adjustment (bit shift) */ 624 int oom_adj; /* OOM kill score adjustment (bit shift) */
625 int oom_score_adj; /* OOM kill score adjustment */
638}; 626};
639 627
640/* Context switch must be unlocked if interrupts are to be enabled */ 628/* Context switch must be unlocked if interrupts are to be enabled */
@@ -805,7 +793,7 @@ enum cpu_idle_type {
805#define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */ 793#define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */
806#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ 794#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
807#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ 795#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
808 796#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
809#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ 797#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
810 798
811enum powersavings_balance_level { 799enum powersavings_balance_level {
@@ -840,6 +828,8 @@ static inline int sd_balance_for_package_power(void)
840 return SD_PREFER_SIBLING; 828 return SD_PREFER_SIBLING;
841} 829}
842 830
831extern int __weak arch_sd_sibiling_asym_packing(void);
832
843/* 833/*
844 * Optimise SD flags for power savings: 834 * Optimise SD flags for power savings:
845 * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings. 835 * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings.
@@ -861,7 +851,7 @@ struct sched_group {
861 * CPU power of this group, SCHED_LOAD_SCALE being max power for a 851 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
862 * single CPU. 852 * single CPU.
863 */ 853 */
864 unsigned int cpu_power; 854 unsigned int cpu_power, cpu_power_orig;
865 855
866 /* 856 /*
867 * The CPUs this group covers. 857 * The CPUs this group covers.
@@ -1697,6 +1687,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1697#define PF_EXITING 0x00000004 /* getting shut down */ 1687#define PF_EXITING 0x00000004 /* getting shut down */
1698#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ 1688#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
1699#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ 1689#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
1690#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
1700#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ 1691#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
1701#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ 1692#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
1702#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ 1693#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
@@ -1791,20 +1782,23 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1791#endif 1782#endif
1792 1783
1793/* 1784/*
1794 * Architectures can set this to 1 if they have specified 1785 * Do not use outside of architecture code which knows its limitations.
1795 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, 1786 *
1796 * but then during bootup it turns out that sched_clock() 1787 * sched_clock() has no promise of monotonicity or bounded drift between
1797 * is reliable after all: 1788 * CPUs, use (which you should not) requires disabling IRQs.
1789 *
1790 * Please use one of the three interfaces below.
1798 */ 1791 */
1799#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1800extern int sched_clock_stable;
1801#endif
1802
1803/* ftrace calls sched_clock() directly */
1804extern unsigned long long notrace sched_clock(void); 1792extern unsigned long long notrace sched_clock(void);
1793/*
1794 * See the comment in kernel/sched_clock.c
1795 */
1796extern u64 cpu_clock(int cpu);
1797extern u64 local_clock(void);
1798extern u64 sched_clock_cpu(int cpu);
1799
1805 1800
1806extern void sched_clock_init(void); 1801extern void sched_clock_init(void);
1807extern u64 sched_clock_cpu(int cpu);
1808 1802
1809#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 1803#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1810static inline void sched_clock_tick(void) 1804static inline void sched_clock_tick(void)
@@ -1819,17 +1813,19 @@ static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
1819{ 1813{
1820} 1814}
1821#else 1815#else
1816/*
1817 * Architectures can set this to 1 if they have specified
1818 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1819 * but then during bootup it turns out that sched_clock()
1820 * is reliable after all:
1821 */
1822extern int sched_clock_stable;
1823
1822extern void sched_clock_tick(void); 1824extern void sched_clock_tick(void);
1823extern void sched_clock_idle_sleep_event(void); 1825extern void sched_clock_idle_sleep_event(void);
1824extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1826extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1825#endif 1827#endif
1826 1828
1827/*
1828 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
1829 * clock constructed from sched_clock():
1830 */
1831extern unsigned long long cpu_clock(int cpu);
1832
1833extern unsigned long long 1829extern unsigned long long
1834task_sched_runtime(struct task_struct *task); 1830task_sched_runtime(struct task_struct *task);
1835extern unsigned long long thread_group_sched_runtime(struct task_struct *task); 1831extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
@@ -2113,7 +2109,9 @@ extern void daemonize(const char *, ...);
2113extern int allow_signal(int); 2109extern int allow_signal(int);
2114extern int disallow_signal(int); 2110extern int disallow_signal(int);
2115 2111
2116extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); 2112extern int do_execve(const char *,
2113 const char __user * const __user *,
2114 const char __user * const __user *, struct pt_regs *);
2117extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); 2115extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
2118struct task_struct *fork_idle(int); 2116struct task_struct *fork_idle(int);
2119 2117
@@ -2435,18 +2433,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2435 2433
2436#endif /* CONFIG_SMP */ 2434#endif /* CONFIG_SMP */
2437 2435
2438#ifdef CONFIG_TRACING
2439extern void
2440__trace_special(void *__tr, void *__data,
2441 unsigned long arg1, unsigned long arg2, unsigned long arg3);
2442#else
2443static inline void
2444__trace_special(void *__tr, void *__data,
2445 unsigned long arg1, unsigned long arg2, unsigned long arg3)
2446{
2447}
2448#endif
2449
2450extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); 2436extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2451extern long sched_getaffinity(pid_t pid, struct cpumask *mask); 2437extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2452 2438