aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-20 13:31:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-20 13:31:44 -0400
commit2ba68940c893c8f0bfc8573c041254251bb6aeab (patch)
treefa83ebb01d32abd98123fa28f9f6f0b3eaeee25d /include/linux/sched.h
parent9c2b957db1772ebf942ae7a9346b14eba6c8ca66 (diff)
parent600e145882802d6ccbfe2c4aea243d97caeb91a9 (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes for v3.4 from Ingo Molnar * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (27 commits) printk: Make it compile with !CONFIG_PRINTK sched/x86: Fix overflow in cyc2ns_offset sched: Fix nohz load accounting -- again! sched: Update yield() docs printk/sched: Introduce special printk_sched() for those awkward moments sched/nohz: Correctly initialize 'next_balance' in 'nohz' idle balancer sched: Cleanup cpu_active madness sched: Fix load-balance wreckage sched: Clean up parameter passing of proc_sched_autogroup_set_nice() sched: Ditch per cgroup task lists for load-balancing sched: Rename load-balancing fields sched: Move load-balancing arguments into helper struct sched/rt: Do not submit new work when PI-blocked sched/rt: Prevent idle task boosting sched/wait: Add __wake_up_all_locked() API sched/rt: Document scheduler related skip-resched-check sites sched/rt: Use schedule_preempt_disabled() sched/rt: Add schedule_preempt_disabled() sched/rt: Do not throttle when PI boosting sched/rt: Keep period timer ticking when rt throttling is active ...
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h41
1 files changed, 34 insertions, 7 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e345163da657..e074e1e54f85 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -361,6 +361,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout);
361extern signed long schedule_timeout_killable(signed long timeout); 361extern signed long schedule_timeout_killable(signed long timeout);
362extern signed long schedule_timeout_uninterruptible(signed long timeout); 362extern signed long schedule_timeout_uninterruptible(signed long timeout);
363asmlinkage void schedule(void); 363asmlinkage void schedule(void);
364extern void schedule_preempt_disabled(void);
364extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); 365extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
365 366
366struct nsproxy; 367struct nsproxy;
@@ -905,6 +906,7 @@ struct sched_group_power {
905 * single CPU. 906 * single CPU.
906 */ 907 */
907 unsigned int power, power_orig; 908 unsigned int power, power_orig;
909 unsigned long next_update;
908 /* 910 /*
909 * Number of busy cpus in this group. 911 * Number of busy cpus in this group.
910 */ 912 */
@@ -1052,6 +1054,8 @@ static inline int test_sd_parent(struct sched_domain *sd, int flag)
1052unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu); 1054unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
1053unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu); 1055unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
1054 1056
1057bool cpus_share_cache(int this_cpu, int that_cpu);
1058
1055#else /* CONFIG_SMP */ 1059#else /* CONFIG_SMP */
1056 1060
1057struct sched_domain_attr; 1061struct sched_domain_attr;
@@ -1061,6 +1065,12 @@ partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1061 struct sched_domain_attr *dattr_new) 1065 struct sched_domain_attr *dattr_new)
1062{ 1066{
1063} 1067}
1068
1069static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1070{
1071 return true;
1072}
1073
1064#endif /* !CONFIG_SMP */ 1074#endif /* !CONFIG_SMP */
1065 1075
1066 1076
@@ -1225,6 +1235,12 @@ struct sched_rt_entity {
1225#endif 1235#endif
1226}; 1236};
1227 1237
1238/*
1239 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
1240 * Timeslices get refilled after they expire.
1241 */
1242#define RR_TIMESLICE (100 * HZ / 1000)
1243
1228struct rcu_node; 1244struct rcu_node;
1229 1245
1230enum perf_event_task_context { 1246enum perf_event_task_context {
@@ -2047,7 +2063,7 @@ extern void sched_autogroup_fork(struct signal_struct *sig);
2047extern void sched_autogroup_exit(struct signal_struct *sig); 2063extern void sched_autogroup_exit(struct signal_struct *sig);
2048#ifdef CONFIG_PROC_FS 2064#ifdef CONFIG_PROC_FS
2049extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); 2065extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2050extern int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice); 2066extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2051#endif 2067#endif
2052#else 2068#else
2053static inline void sched_autogroup_create_attach(struct task_struct *p) { } 2069static inline void sched_autogroup_create_attach(struct task_struct *p) { }
@@ -2064,12 +2080,20 @@ extern unsigned int sysctl_sched_cfs_bandwidth_slice;
2064extern int rt_mutex_getprio(struct task_struct *p); 2080extern int rt_mutex_getprio(struct task_struct *p);
2065extern void rt_mutex_setprio(struct task_struct *p, int prio); 2081extern void rt_mutex_setprio(struct task_struct *p, int prio);
2066extern void rt_mutex_adjust_pi(struct task_struct *p); 2082extern void rt_mutex_adjust_pi(struct task_struct *p);
2083static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2084{
2085 return tsk->pi_blocked_on != NULL;
2086}
2067#else 2087#else
2068static inline int rt_mutex_getprio(struct task_struct *p) 2088static inline int rt_mutex_getprio(struct task_struct *p)
2069{ 2089{
2070 return p->normal_prio; 2090 return p->normal_prio;
2071} 2091}
2072# define rt_mutex_adjust_pi(p) do { } while (0) 2092# define rt_mutex_adjust_pi(p) do { } while (0)
2093static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
2094{
2095 return false;
2096}
2073#endif 2097#endif
2074 2098
2075extern bool yield_to(struct task_struct *p, bool preempt); 2099extern bool yield_to(struct task_struct *p, bool preempt);
@@ -2388,12 +2412,15 @@ static inline void task_unlock(struct task_struct *p)
2388extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, 2412extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2389 unsigned long *flags); 2413 unsigned long *flags);
2390 2414
2391#define lock_task_sighand(tsk, flags) \ 2415static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2392({ struct sighand_struct *__ss; \ 2416 unsigned long *flags)
2393 __cond_lock(&(tsk)->sighand->siglock, \ 2417{
2394 (__ss = __lock_task_sighand(tsk, flags))); \ 2418 struct sighand_struct *ret;
2395 __ss; \ 2419
2396}) \ 2420 ret = __lock_task_sighand(tsk, flags);
2421 (void)__cond_lock(&tsk->sighand->siglock, ret);
2422 return ret;
2423}
2397 2424
2398static inline void unlock_task_sighand(struct task_struct *tsk, 2425static inline void unlock_task_sighand(struct task_struct *tsk,
2399 unsigned long *flags) 2426 unsigned long *flags)