diff options
author | Mark Brown <broonie@opensource.wolfsonmicro.com> | 2010-08-16 13:42:58 -0400 |
---|---|---|
committer | Mark Brown <broonie@opensource.wolfsonmicro.com> | 2010-08-16 13:42:58 -0400 |
commit | e4862f2f6f5653dfb67f3ba2b6f0bc74516ed51a (patch) | |
tree | 1db5a0540a4eecfad9b7daee476b985e82ddc810 /include/linux/sched.h | |
parent | ec62dbd7eb8e3dddb221da89ecbcea0fc3dee8c1 (diff) | |
parent | b2c1e07b81a126e5846dfc3d36f559d861df59f4 (diff) |
Merge branch 'for-2.6.36' into for-2.6.37
Fairly simple conflicts, the most serious ones are the i.MX ones which I
suspect now need another rename.
Conflicts:
arch/arm/mach-mx2/clock_imx27.c
arch/arm/mach-mx2/devices.c
arch/arm/mach-omap2/board-rx51-peripherals.c
arch/arm/mach-omap2/board-zoom2.c
sound/soc/fsl/mpc5200_dma.c
sound/soc/fsl/mpc5200_dma.h
sound/soc/fsl/mpc8610_hpcd.c
sound/soc/pxa/spitz.c
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 89 |
1 files changed, 37 insertions, 52 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index f118809c953f..ce160d68f5e7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -139,7 +139,7 @@ extern int nr_processes(void); | |||
139 | extern unsigned long nr_running(void); | 139 | extern unsigned long nr_running(void); |
140 | extern unsigned long nr_uninterruptible(void); | 140 | extern unsigned long nr_uninterruptible(void); |
141 | extern unsigned long nr_iowait(void); | 141 | extern unsigned long nr_iowait(void); |
142 | extern unsigned long nr_iowait_cpu(void); | 142 | extern unsigned long nr_iowait_cpu(int cpu); |
143 | extern unsigned long this_cpu_load(void); | 143 | extern unsigned long this_cpu_load(void); |
144 | 144 | ||
145 | 145 | ||
@@ -214,6 +214,7 @@ extern char ___assert_task_state[1 - 2*!!( | |||
214 | 214 | ||
215 | #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) | 215 | #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) |
216 | #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) | 216 | #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) |
217 | #define task_is_dead(task) ((task)->exit_state != 0) | ||
217 | #define task_is_stopped_or_traced(task) \ | 218 | #define task_is_stopped_or_traced(task) \ |
218 | ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) | 219 | ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) |
219 | #define task_contributes_to_load(task) \ | 220 | #define task_contributes_to_load(task) \ |
@@ -271,19 +272,10 @@ extern int runqueue_is_locked(int cpu); | |||
271 | 272 | ||
272 | extern cpumask_var_t nohz_cpu_mask; | 273 | extern cpumask_var_t nohz_cpu_mask; |
273 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) | 274 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) |
274 | extern int select_nohz_load_balancer(int cpu); | 275 | extern void select_nohz_load_balancer(int stop_tick); |
275 | extern int get_nohz_load_balancer(void); | 276 | extern int get_nohz_timer_target(void); |
276 | extern int nohz_ratelimit(int cpu); | ||
277 | #else | 277 | #else |
278 | static inline int select_nohz_load_balancer(int cpu) | 278 | static inline void select_nohz_load_balancer(int stop_tick) { } |
279 | { | ||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | static inline int nohz_ratelimit(int cpu) | ||
284 | { | ||
285 | return 0; | ||
286 | } | ||
287 | #endif | 279 | #endif |
288 | 280 | ||
289 | /* | 281 | /* |
@@ -315,20 +307,16 @@ extern void scheduler_tick(void); | |||
315 | 307 | ||
316 | extern void sched_show_task(struct task_struct *p); | 308 | extern void sched_show_task(struct task_struct *p); |
317 | 309 | ||
318 | #ifdef CONFIG_DETECT_SOFTLOCKUP | 310 | #ifdef CONFIG_LOCKUP_DETECTOR |
319 | extern void softlockup_tick(void); | ||
320 | extern void touch_softlockup_watchdog(void); | 311 | extern void touch_softlockup_watchdog(void); |
321 | extern void touch_softlockup_watchdog_sync(void); | 312 | extern void touch_softlockup_watchdog_sync(void); |
322 | extern void touch_all_softlockup_watchdogs(void); | 313 | extern void touch_all_softlockup_watchdogs(void); |
323 | extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, | 314 | extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, |
324 | void __user *buffer, | 315 | void __user *buffer, |
325 | size_t *lenp, loff_t *ppos); | 316 | size_t *lenp, loff_t *ppos); |
326 | extern unsigned int softlockup_panic; | 317 | extern unsigned int softlockup_panic; |
327 | extern int softlockup_thresh; | 318 | extern int softlockup_thresh; |
328 | #else | 319 | #else |
329 | static inline void softlockup_tick(void) | ||
330 | { | ||
331 | } | ||
332 | static inline void touch_softlockup_watchdog(void) | 320 | static inline void touch_softlockup_watchdog(void) |
333 | { | 321 | { |
334 | } | 322 | } |
@@ -633,7 +621,8 @@ struct signal_struct { | |||
633 | struct tty_audit_buf *tty_audit_buf; | 621 | struct tty_audit_buf *tty_audit_buf; |
634 | #endif | 622 | #endif |
635 | 623 | ||
636 | int oom_adj; /* OOM kill score adjustment (bit shift) */ | 624 | int oom_adj; /* OOM kill score adjustment (bit shift) */ |
625 | int oom_score_adj; /* OOM kill score adjustment */ | ||
637 | }; | 626 | }; |
638 | 627 | ||
639 | /* Context switch must be unlocked if interrupts are to be enabled */ | 628 | /* Context switch must be unlocked if interrupts are to be enabled */ |
@@ -804,7 +793,7 @@ enum cpu_idle_type { | |||
804 | #define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */ | 793 | #define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */ |
805 | #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ | 794 | #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ |
806 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ | 795 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ |
807 | 796 | #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ | |
808 | #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ | 797 | #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ |
809 | 798 | ||
810 | enum powersavings_balance_level { | 799 | enum powersavings_balance_level { |
@@ -839,6 +828,8 @@ static inline int sd_balance_for_package_power(void) | |||
839 | return SD_PREFER_SIBLING; | 828 | return SD_PREFER_SIBLING; |
840 | } | 829 | } |
841 | 830 | ||
831 | extern int __weak arch_sd_sibiling_asym_packing(void); | ||
832 | |||
842 | /* | 833 | /* |
843 | * Optimise SD flags for power savings: | 834 | * Optimise SD flags for power savings: |
844 | * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings. | 835 | * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings. |
@@ -860,7 +851,7 @@ struct sched_group { | |||
860 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a | 851 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a |
861 | * single CPU. | 852 | * single CPU. |
862 | */ | 853 | */ |
863 | unsigned int cpu_power; | 854 | unsigned int cpu_power, cpu_power_orig; |
864 | 855 | ||
865 | /* | 856 | /* |
866 | * The CPUs this group covers. | 857 | * The CPUs this group covers. |
@@ -1696,6 +1687,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
1696 | #define PF_EXITING 0x00000004 /* getting shut down */ | 1687 | #define PF_EXITING 0x00000004 /* getting shut down */ |
1697 | #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ | 1688 | #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ |
1698 | #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ | 1689 | #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ |
1690 | #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ | ||
1699 | #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ | 1691 | #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ |
1700 | #define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ | 1692 | #define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ |
1701 | #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ | 1693 | #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ |
@@ -1790,20 +1782,23 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | |||
1790 | #endif | 1782 | #endif |
1791 | 1783 | ||
1792 | /* | 1784 | /* |
1793 | * Architectures can set this to 1 if they have specified | 1785 | * Do not use outside of architecture code which knows its limitations. |
1794 | * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, | 1786 | * |
1795 | * but then during bootup it turns out that sched_clock() | 1787 | * sched_clock() has no promise of monotonicity or bounded drift between |
1796 | * is reliable after all: | 1788 | * CPUs, use (which you should not) requires disabling IRQs. |
1789 | * | ||
1790 | * Please use one of the three interfaces below. | ||
1797 | */ | 1791 | */ |
1798 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | ||
1799 | extern int sched_clock_stable; | ||
1800 | #endif | ||
1801 | |||
1802 | /* ftrace calls sched_clock() directly */ | ||
1803 | extern unsigned long long notrace sched_clock(void); | 1792 | extern unsigned long long notrace sched_clock(void); |
1793 | /* | ||
1794 | * See the comment in kernel/sched_clock.c | ||
1795 | */ | ||
1796 | extern u64 cpu_clock(int cpu); | ||
1797 | extern u64 local_clock(void); | ||
1798 | extern u64 sched_clock_cpu(int cpu); | ||
1799 | |||
1804 | 1800 | ||
1805 | extern void sched_clock_init(void); | 1801 | extern void sched_clock_init(void); |
1806 | extern u64 sched_clock_cpu(int cpu); | ||
1807 | 1802 | ||
1808 | #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | 1803 | #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
1809 | static inline void sched_clock_tick(void) | 1804 | static inline void sched_clock_tick(void) |
@@ -1818,17 +1813,19 @@ static inline void sched_clock_idle_wakeup_event(u64 delta_ns) | |||
1818 | { | 1813 | { |
1819 | } | 1814 | } |
1820 | #else | 1815 | #else |
1816 | /* | ||
1817 | * Architectures can set this to 1 if they have specified | ||
1818 | * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, | ||
1819 | * but then during bootup it turns out that sched_clock() | ||
1820 | * is reliable after all: | ||
1821 | */ | ||
1822 | extern int sched_clock_stable; | ||
1823 | |||
1821 | extern void sched_clock_tick(void); | 1824 | extern void sched_clock_tick(void); |
1822 | extern void sched_clock_idle_sleep_event(void); | 1825 | extern void sched_clock_idle_sleep_event(void); |
1823 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); | 1826 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); |
1824 | #endif | 1827 | #endif |
1825 | 1828 | ||
1826 | /* | ||
1827 | * For kernel-internal use: high-speed (but slightly incorrect) per-cpu | ||
1828 | * clock constructed from sched_clock(): | ||
1829 | */ | ||
1830 | extern unsigned long long cpu_clock(int cpu); | ||
1831 | |||
1832 | extern unsigned long long | 1829 | extern unsigned long long |
1833 | task_sched_runtime(struct task_struct *task); | 1830 | task_sched_runtime(struct task_struct *task); |
1834 | extern unsigned long long thread_group_sched_runtime(struct task_struct *task); | 1831 | extern unsigned long long thread_group_sched_runtime(struct task_struct *task); |
@@ -2434,18 +2431,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) | |||
2434 | 2431 | ||
2435 | #endif /* CONFIG_SMP */ | 2432 | #endif /* CONFIG_SMP */ |
2436 | 2433 | ||
2437 | #ifdef CONFIG_TRACING | ||
2438 | extern void | ||
2439 | __trace_special(void *__tr, void *__data, | ||
2440 | unsigned long arg1, unsigned long arg2, unsigned long arg3); | ||
2441 | #else | ||
2442 | static inline void | ||
2443 | __trace_special(void *__tr, void *__data, | ||
2444 | unsigned long arg1, unsigned long arg2, unsigned long arg3) | ||
2445 | { | ||
2446 | } | ||
2447 | #endif | ||
2448 | |||
2449 | extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); | 2434 | extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); |
2450 | extern long sched_getaffinity(pid_t pid, struct cpumask *mask); | 2435 | extern long sched_getaffinity(pid_t pid, struct cpumask *mask); |
2451 | 2436 | ||