diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/cpu.h | 25 | ||||
-rw-r--r-- | include/linux/cpuset.h | 6 | ||||
-rw-r--r-- | include/linux/perf_event.h | 2 | ||||
-rw-r--r-- | include/linux/sched.h | 53 | ||||
-rw-r--r-- | include/linux/topology.h | 1 |
5 files changed, 62 insertions, 25 deletions
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index e287863ac053..de6b1722cdca 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
@@ -48,6 +48,31 @@ extern ssize_t arch_cpu_release(const char *, size_t); | |||
48 | #endif | 48 | #endif |
49 | struct notifier_block; | 49 | struct notifier_block; |
50 | 50 | ||
51 | /* | ||
52 | * CPU notifier priorities. | ||
53 | */ | ||
54 | enum { | ||
55 | /* | ||
56 | * SCHED_ACTIVE marks a cpu which is coming up active during | ||
57 | * CPU_ONLINE and CPU_DOWN_FAILED and must be the first | ||
58 | * notifier. CPUSET_ACTIVE adjusts cpuset according to | ||
59 | * cpu_active mask right after SCHED_ACTIVE. During | ||
60 | * CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are | ||
61 | * ordered in the similar way. | ||
62 | * | ||
63 | * This ordering guarantees consistent cpu_active mask and | ||
64 | * migration behavior to all cpu notifiers. | ||
65 | */ | ||
66 | CPU_PRI_SCHED_ACTIVE = INT_MAX, | ||
67 | CPU_PRI_CPUSET_ACTIVE = INT_MAX - 1, | ||
68 | CPU_PRI_SCHED_INACTIVE = INT_MIN + 1, | ||
69 | CPU_PRI_CPUSET_INACTIVE = INT_MIN, | ||
70 | |||
71 | /* migration should happen before other stuff but after perf */ | ||
72 | CPU_PRI_PERF = 20, | ||
73 | CPU_PRI_MIGRATION = 10, | ||
74 | }; | ||
75 | |||
51 | #ifdef CONFIG_SMP | 76 | #ifdef CONFIG_SMP |
52 | /* Need to know about CPUs going up/down? */ | 77 | /* Need to know about CPUs going up/down? */ |
53 | #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) | 78 | #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) |
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 457ed765a116..f20eb8f16025 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
@@ -20,6 +20,7 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */ | |||
20 | 20 | ||
21 | extern int cpuset_init(void); | 21 | extern int cpuset_init(void); |
22 | extern void cpuset_init_smp(void); | 22 | extern void cpuset_init_smp(void); |
23 | extern void cpuset_update_active_cpus(void); | ||
23 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); | 24 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); |
24 | extern int cpuset_cpus_allowed_fallback(struct task_struct *p); | 25 | extern int cpuset_cpus_allowed_fallback(struct task_struct *p); |
25 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); | 26 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); |
@@ -132,6 +133,11 @@ static inline void set_mems_allowed(nodemask_t nodemask) | |||
132 | static inline int cpuset_init(void) { return 0; } | 133 | static inline int cpuset_init(void) { return 0; } |
133 | static inline void cpuset_init_smp(void) {} | 134 | static inline void cpuset_init_smp(void) {} |
134 | 135 | ||
136 | static inline void cpuset_update_active_cpus(void) | ||
137 | { | ||
138 | partition_sched_domains(1, NULL, NULL); | ||
139 | } | ||
140 | |||
135 | static inline void cpuset_cpus_allowed(struct task_struct *p, | 141 | static inline void cpuset_cpus_allowed(struct task_struct *p, |
136 | struct cpumask *mask) | 142 | struct cpumask *mask) |
137 | { | 143 | { |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 5d0266d94985..469e03e96fe7 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -1068,7 +1068,7 @@ static inline void perf_event_disable(struct perf_event *event) { } | |||
1068 | #define perf_cpu_notifier(fn) \ | 1068 | #define perf_cpu_notifier(fn) \ |
1069 | do { \ | 1069 | do { \ |
1070 | static struct notifier_block fn##_nb __cpuinitdata = \ | 1070 | static struct notifier_block fn##_nb __cpuinitdata = \ |
1071 | { .notifier_call = fn, .priority = 20 }; \ | 1071 | { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ |
1072 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ | 1072 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ |
1073 | (void *)(unsigned long)smp_processor_id()); \ | 1073 | (void *)(unsigned long)smp_processor_id()); \ |
1074 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ | 1074 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 747fcaedddb7..9a7bc5ba7e7e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -271,14 +271,11 @@ extern int runqueue_is_locked(int cpu); | |||
271 | 271 | ||
272 | extern cpumask_var_t nohz_cpu_mask; | 272 | extern cpumask_var_t nohz_cpu_mask; |
273 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) | 273 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) |
274 | extern int select_nohz_load_balancer(int cpu); | 274 | extern void select_nohz_load_balancer(int stop_tick); |
275 | extern int get_nohz_load_balancer(void); | 275 | extern int get_nohz_timer_target(void); |
276 | extern int nohz_ratelimit(int cpu); | 276 | extern int nohz_ratelimit(int cpu); |
277 | #else | 277 | #else |
278 | static inline int select_nohz_load_balancer(int cpu) | 278 | static inline void select_nohz_load_balancer(int stop_tick) { } |
279 | { | ||
280 | return 0; | ||
281 | } | ||
282 | 279 | ||
283 | static inline int nohz_ratelimit(int cpu) | 280 | static inline int nohz_ratelimit(int cpu) |
284 | { | 281 | { |
@@ -804,7 +801,7 @@ enum cpu_idle_type { | |||
804 | #define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */ | 801 | #define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */ |
805 | #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ | 802 | #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ |
806 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ | 803 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ |
807 | 804 | #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ | |
808 | #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ | 805 | #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ |
809 | 806 | ||
810 | enum powersavings_balance_level { | 807 | enum powersavings_balance_level { |
@@ -839,6 +836,8 @@ static inline int sd_balance_for_package_power(void) | |||
839 | return SD_PREFER_SIBLING; | 836 | return SD_PREFER_SIBLING; |
840 | } | 837 | } |
841 | 838 | ||
839 | extern int __weak arch_sd_sibiling_asym_packing(void); | ||
840 | |||
842 | /* | 841 | /* |
843 | * Optimise SD flags for power savings: | 842 | * Optimise SD flags for power savings: |
844 | * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings. | 843 | * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings. |
@@ -860,7 +859,7 @@ struct sched_group { | |||
860 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a | 859 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a |
861 | * single CPU. | 860 | * single CPU. |
862 | */ | 861 | */ |
863 | unsigned int cpu_power; | 862 | unsigned int cpu_power, cpu_power_orig; |
864 | 863 | ||
865 | /* | 864 | /* |
866 | * The CPUs this group covers. | 865 | * The CPUs this group covers. |
@@ -1696,6 +1695,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
1696 | #define PF_EXITING 0x00000004 /* getting shut down */ | 1695 | #define PF_EXITING 0x00000004 /* getting shut down */ |
1697 | #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ | 1696 | #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ |
1698 | #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ | 1697 | #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ |
1698 | #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ | ||
1699 | #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ | 1699 | #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ |
1700 | #define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ | 1700 | #define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ |
1701 | #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ | 1701 | #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ |
@@ -1790,20 +1790,23 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | |||
1790 | #endif | 1790 | #endif |
1791 | 1791 | ||
1792 | /* | 1792 | /* |
1793 | * Architectures can set this to 1 if they have specified | 1793 | * Do not use outside of architecture code which knows its limitations. |
1794 | * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, | 1794 | * |
1795 | * but then during bootup it turns out that sched_clock() | 1795 | * sched_clock() has no promise of monotonicity or bounded drift between |
1796 | * is reliable after all: | 1796 | * CPUs, use (which you should not) requires disabling IRQs. |
1797 | * | ||
1798 | * Please use one of the three interfaces below. | ||
1797 | */ | 1799 | */ |
1798 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | ||
1799 | extern int sched_clock_stable; | ||
1800 | #endif | ||
1801 | |||
1802 | /* ftrace calls sched_clock() directly */ | ||
1803 | extern unsigned long long notrace sched_clock(void); | 1800 | extern unsigned long long notrace sched_clock(void); |
1801 | /* | ||
1802 | * See the comment in kernel/sched_clock.c | ||
1803 | */ | ||
1804 | extern u64 cpu_clock(int cpu); | ||
1805 | extern u64 local_clock(void); | ||
1806 | extern u64 sched_clock_cpu(int cpu); | ||
1807 | |||
1804 | 1808 | ||
1805 | extern void sched_clock_init(void); | 1809 | extern void sched_clock_init(void); |
1806 | extern u64 sched_clock_cpu(int cpu); | ||
1807 | 1810 | ||
1808 | #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | 1811 | #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
1809 | static inline void sched_clock_tick(void) | 1812 | static inline void sched_clock_tick(void) |
@@ -1818,17 +1821,19 @@ static inline void sched_clock_idle_wakeup_event(u64 delta_ns) | |||
1818 | { | 1821 | { |
1819 | } | 1822 | } |
1820 | #else | 1823 | #else |
1824 | /* | ||
1825 | * Architectures can set this to 1 if they have specified | ||
1826 | * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, | ||
1827 | * but then during bootup it turns out that sched_clock() | ||
1828 | * is reliable after all: | ||
1829 | */ | ||
1830 | extern int sched_clock_stable; | ||
1831 | |||
1821 | extern void sched_clock_tick(void); | 1832 | extern void sched_clock_tick(void); |
1822 | extern void sched_clock_idle_sleep_event(void); | 1833 | extern void sched_clock_idle_sleep_event(void); |
1823 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); | 1834 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); |
1824 | #endif | 1835 | #endif |
1825 | 1836 | ||
1826 | /* | ||
1827 | * For kernel-internal use: high-speed (but slightly incorrect) per-cpu | ||
1828 | * clock constructed from sched_clock(): | ||
1829 | */ | ||
1830 | extern unsigned long long cpu_clock(int cpu); | ||
1831 | |||
1832 | extern unsigned long long | 1837 | extern unsigned long long |
1833 | task_sched_runtime(struct task_struct *task); | 1838 | task_sched_runtime(struct task_struct *task); |
1834 | extern unsigned long long thread_group_sched_runtime(struct task_struct *task); | 1839 | extern unsigned long long thread_group_sched_runtime(struct task_struct *task); |
diff --git a/include/linux/topology.h b/include/linux/topology.h index c44df50a05ab..b572e432d2f3 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h | |||
@@ -103,6 +103,7 @@ int arch_update_cpu_topology(void); | |||
103 | | 1*SD_SHARE_PKG_RESOURCES \ | 103 | | 1*SD_SHARE_PKG_RESOURCES \ |
104 | | 0*SD_SERIALIZE \ | 104 | | 0*SD_SERIALIZE \ |
105 | | 0*SD_PREFER_SIBLING \ | 105 | | 0*SD_PREFER_SIBLING \ |
106 | | arch_sd_sibling_asym_packing() \ | ||
106 | , \ | 107 | , \ |
107 | .last_balance = jiffies, \ | 108 | .last_balance = jiffies, \ |
108 | .balance_interval = 1, \ | 109 | .balance_interval = 1, \ |