aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h59
1 files changed, 29 insertions, 30 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3992f50de614..9591907c4f79 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -272,19 +272,10 @@ extern int runqueue_is_locked(int cpu);
272 272
273extern cpumask_var_t nohz_cpu_mask; 273extern cpumask_var_t nohz_cpu_mask;
274#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 274#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
275extern int select_nohz_load_balancer(int cpu); 275extern void select_nohz_load_balancer(int stop_tick);
276extern int get_nohz_load_balancer(void); 276extern int get_nohz_timer_target(void);
277extern int nohz_ratelimit(int cpu);
278#else 277#else
279static inline int select_nohz_load_balancer(int cpu) 278static inline void select_nohz_load_balancer(int stop_tick) { }
280{
281 return 0;
282}
283
284static inline int nohz_ratelimit(int cpu)
285{
286 return 0;
287}
288#endif 279#endif
289 280
290/* 281/*
@@ -801,7 +792,7 @@ enum cpu_idle_type {
801#define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */ 792#define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */
802#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ 793#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
803#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ 794#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
804 795#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
805#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ 796#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
806 797
807enum powersavings_balance_level { 798enum powersavings_balance_level {
@@ -836,6 +827,8 @@ static inline int sd_balance_for_package_power(void)
836 return SD_PREFER_SIBLING; 827 return SD_PREFER_SIBLING;
837} 828}
838 829
830extern int __weak arch_sd_sibiling_asym_packing(void);
831
839/* 832/*
840 * Optimise SD flags for power savings: 833 * Optimise SD flags for power savings:
841 * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings. 834 * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings.
@@ -857,7 +850,7 @@ struct sched_group {
857 * CPU power of this group, SCHED_LOAD_SCALE being max power for a 850 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
858 * single CPU. 851 * single CPU.
859 */ 852 */
860 unsigned int cpu_power; 853 unsigned int cpu_power, cpu_power_orig;
861 854
862 /* 855 /*
863 * The CPUs this group covers. 856 * The CPUs this group covers.
@@ -1693,6 +1686,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1693#define PF_EXITING 0x00000004 /* getting shut down */ 1686#define PF_EXITING 0x00000004 /* getting shut down */
1694#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ 1687#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
1695#define PF_VCPU 0x00000010 /* I'm a virtual CPU */ 1688#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
1689#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
1696#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ 1690#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
1697#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ 1691#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
1698#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ 1692#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
@@ -1787,20 +1781,23 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1787#endif 1781#endif
1788 1782
1789/* 1783/*
1790 * Architectures can set this to 1 if they have specified 1784 * Do not use outside of architecture code which knows its limitations.
1791 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, 1785 *
1792 * but then during bootup it turns out that sched_clock() 1786 * sched_clock() has no promise of monotonicity or bounded drift between
1793 * is reliable after all: 1787 * CPUs, use (which you should not) requires disabling IRQs.
1788 *
1789 * Please use one of the three interfaces below.
1794 */ 1790 */
1795#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1796extern int sched_clock_stable;
1797#endif
1798
1799/* ftrace calls sched_clock() directly */
1800extern unsigned long long notrace sched_clock(void); 1791extern unsigned long long notrace sched_clock(void);
1792/*
1793 * See the comment in kernel/sched_clock.c
1794 */
1795extern u64 cpu_clock(int cpu);
1796extern u64 local_clock(void);
1797extern u64 sched_clock_cpu(int cpu);
1798
1801 1799
1802extern void sched_clock_init(void); 1800extern void sched_clock_init(void);
1803extern u64 sched_clock_cpu(int cpu);
1804 1801
1805#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 1802#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1806static inline void sched_clock_tick(void) 1803static inline void sched_clock_tick(void)
@@ -1815,17 +1812,19 @@ static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
1815{ 1812{
1816} 1813}
1817#else 1814#else
1815/*
1816 * Architectures can set this to 1 if they have specified
1817 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1818 * but then during bootup it turns out that sched_clock()
1819 * is reliable after all:
1820 */
1821extern int sched_clock_stable;
1822
1818extern void sched_clock_tick(void); 1823extern void sched_clock_tick(void);
1819extern void sched_clock_idle_sleep_event(void); 1824extern void sched_clock_idle_sleep_event(void);
1820extern void sched_clock_idle_wakeup_event(u64 delta_ns); 1825extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1821#endif 1826#endif
1822 1827
1823/*
1824 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
1825 * clock constructed from sched_clock():
1826 */
1827extern unsigned long long cpu_clock(int cpu);
1828
1829extern unsigned long long 1828extern unsigned long long
1830task_sched_runtime(struct task_struct *task); 1829task_sched_runtime(struct task_struct *task);
1831extern unsigned long long thread_group_sched_runtime(struct task_struct *task); 1830extern unsigned long long thread_group_sched_runtime(struct task_struct *task);