diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-20 13:31:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-20 13:31:44 -0400 |
commit | 2ba68940c893c8f0bfc8573c041254251bb6aeab (patch) | |
tree | fa83ebb01d32abd98123fa28f9f6f0b3eaeee25d /include | |
parent | 9c2b957db1772ebf942ae7a9346b14eba6c8ca66 (diff) | |
parent | 600e145882802d6ccbfe2c4aea243d97caeb91a9 (diff) |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes for v3.4 from Ingo Molnar
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (27 commits)
printk: Make it compile with !CONFIG_PRINTK
sched/x86: Fix overflow in cyc2ns_offset
sched: Fix nohz load accounting -- again!
sched: Update yield() docs
printk/sched: Introduce special printk_sched() for those awkward moments
sched/nohz: Correctly initialize 'next_balance' in 'nohz' idle balancer
sched: Cleanup cpu_active madness
sched: Fix load-balance wreckage
sched: Clean up parameter passing of proc_sched_autogroup_set_nice()
sched: Ditch per cgroup task lists for load-balancing
sched: Rename load-balancing fields
sched: Move load-balancing arguments into helper struct
sched/rt: Do not submit new work when PI-blocked
sched/rt: Prevent idle task boosting
sched/wait: Add __wake_up_all_locked() API
sched/rt: Document scheduler related skip-resched-check sites
sched/rt: Use schedule_preempt_disabled()
sched/rt: Add schedule_preempt_disabled()
sched/rt: Do not throttle when PI boosting
sched/rt: Keep period timer ticking when rt throttling is active
...
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/init_task.h | 2 | ||||
-rw-r--r-- | include/linux/kernel.h | 13 | ||||
-rw-r--r-- | include/linux/preempt.h | 5 | ||||
-rw-r--r-- | include/linux/printk.h | 10 | ||||
-rw-r--r-- | include/linux/sched.h | 41 | ||||
-rw-r--r-- | include/linux/wait.h | 5 |
6 files changed, 65 insertions, 11 deletions
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 9c66b1ada9d7..f994d51f70f2 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -149,7 +149,7 @@ extern struct cred init_cred; | |||
149 | }, \ | 149 | }, \ |
150 | .rt = { \ | 150 | .rt = { \ |
151 | .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ | 151 | .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ |
152 | .time_slice = HZ, \ | 152 | .time_slice = RR_TIMESLICE, \ |
153 | .nr_cpus_allowed = NR_CPUS, \ | 153 | .nr_cpus_allowed = NR_CPUS, \ |
154 | }, \ | 154 | }, \ |
155 | .tasks = LIST_HEAD_INIT(tsk.tasks), \ | 155 | .tasks = LIST_HEAD_INIT(tsk.tasks), \ |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index e8343422240a..d801acb5e680 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -85,6 +85,19 @@ | |||
85 | } \ | 85 | } \ |
86 | ) | 86 | ) |
87 | 87 | ||
88 | /* | ||
89 | * Multiplies an integer by a fraction, while avoiding unnecessary | ||
90 | * overflow or loss of precision. | ||
91 | */ | ||
92 | #define mult_frac(x, numer, denom)( \ | ||
93 | { \ | ||
94 | typeof(x) quot = (x) / (denom); \ | ||
95 | typeof(x) rem = (x) % (denom); \ | ||
96 | (quot * (numer)) + ((rem * (numer)) / (denom)); \ | ||
97 | } \ | ||
98 | ) | ||
99 | |||
100 | |||
88 | #define _RET_IP_ (unsigned long)__builtin_return_address(0) | 101 | #define _RET_IP_ (unsigned long)__builtin_return_address(0) |
89 | #define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) | 102 | #define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) |
90 | 103 | ||
diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 58969b2a8a82..5a710b9c578e 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h | |||
@@ -48,12 +48,14 @@ do { \ | |||
48 | barrier(); \ | 48 | barrier(); \ |
49 | } while (0) | 49 | } while (0) |
50 | 50 | ||
51 | #define preempt_enable_no_resched() \ | 51 | #define sched_preempt_enable_no_resched() \ |
52 | do { \ | 52 | do { \ |
53 | barrier(); \ | 53 | barrier(); \ |
54 | dec_preempt_count(); \ | 54 | dec_preempt_count(); \ |
55 | } while (0) | 55 | } while (0) |
56 | 56 | ||
57 | #define preempt_enable_no_resched() sched_preempt_enable_no_resched() | ||
58 | |||
57 | #define preempt_enable() \ | 59 | #define preempt_enable() \ |
58 | do { \ | 60 | do { \ |
59 | preempt_enable_no_resched(); \ | 61 | preempt_enable_no_resched(); \ |
@@ -92,6 +94,7 @@ do { \ | |||
92 | #else /* !CONFIG_PREEMPT_COUNT */ | 94 | #else /* !CONFIG_PREEMPT_COUNT */ |
93 | 95 | ||
94 | #define preempt_disable() do { } while (0) | 96 | #define preempt_disable() do { } while (0) |
97 | #define sched_preempt_enable_no_resched() do { } while (0) | ||
95 | #define preempt_enable_no_resched() do { } while (0) | 98 | #define preempt_enable_no_resched() do { } while (0) |
96 | #define preempt_enable() do { } while (0) | 99 | #define preempt_enable() do { } while (0) |
97 | 100 | ||
diff --git a/include/linux/printk.h b/include/linux/printk.h index f0e22f75143f..1f77a4174ee0 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h | |||
@@ -101,6 +101,11 @@ asmlinkage __printf(1, 2) __cold | |||
101 | int printk(const char *fmt, ...); | 101 | int printk(const char *fmt, ...); |
102 | 102 | ||
103 | /* | 103 | /* |
104 | * Special printk facility for scheduler use only, _DO_NOT_USE_ ! | ||
105 | */ | ||
106 | __printf(1, 2) __cold int printk_sched(const char *fmt, ...); | ||
107 | |||
108 | /* | ||
104 | * Please don't use printk_ratelimit(), because it shares ratelimiting state | 109 | * Please don't use printk_ratelimit(), because it shares ratelimiting state |
105 | * with all other unrelated printk_ratelimit() callsites. Instead use | 110 | * with all other unrelated printk_ratelimit() callsites. Instead use |
106 | * printk_ratelimited() or plain old __ratelimit(). | 111 | * printk_ratelimited() or plain old __ratelimit(). |
@@ -127,6 +132,11 @@ int printk(const char *s, ...) | |||
127 | { | 132 | { |
128 | return 0; | 133 | return 0; |
129 | } | 134 | } |
135 | static inline __printf(1, 2) __cold | ||
136 | int printk_sched(const char *s, ...) | ||
137 | { | ||
138 | return 0; | ||
139 | } | ||
130 | static inline int printk_ratelimit(void) | 140 | static inline int printk_ratelimit(void) |
131 | { | 141 | { |
132 | return 0; | 142 | return 0; |
diff --git a/include/linux/sched.h b/include/linux/sched.h index e345163da657..e074e1e54f85 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -361,6 +361,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout); | |||
361 | extern signed long schedule_timeout_killable(signed long timeout); | 361 | extern signed long schedule_timeout_killable(signed long timeout); |
362 | extern signed long schedule_timeout_uninterruptible(signed long timeout); | 362 | extern signed long schedule_timeout_uninterruptible(signed long timeout); |
363 | asmlinkage void schedule(void); | 363 | asmlinkage void schedule(void); |
364 | extern void schedule_preempt_disabled(void); | ||
364 | extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); | 365 | extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); |
365 | 366 | ||
366 | struct nsproxy; | 367 | struct nsproxy; |
@@ -905,6 +906,7 @@ struct sched_group_power { | |||
905 | * single CPU. | 906 | * single CPU. |
906 | */ | 907 | */ |
907 | unsigned int power, power_orig; | 908 | unsigned int power, power_orig; |
909 | unsigned long next_update; | ||
908 | /* | 910 | /* |
909 | * Number of busy cpus in this group. | 911 | * Number of busy cpus in this group. |
910 | */ | 912 | */ |
@@ -1052,6 +1054,8 @@ static inline int test_sd_parent(struct sched_domain *sd, int flag) | |||
1052 | unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu); | 1054 | unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu); |
1053 | unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu); | 1055 | unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu); |
1054 | 1056 | ||
1057 | bool cpus_share_cache(int this_cpu, int that_cpu); | ||
1058 | |||
1055 | #else /* CONFIG_SMP */ | 1059 | #else /* CONFIG_SMP */ |
1056 | 1060 | ||
1057 | struct sched_domain_attr; | 1061 | struct sched_domain_attr; |
@@ -1061,6 +1065,12 @@ partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], | |||
1061 | struct sched_domain_attr *dattr_new) | 1065 | struct sched_domain_attr *dattr_new) |
1062 | { | 1066 | { |
1063 | } | 1067 | } |
1068 | |||
1069 | static inline bool cpus_share_cache(int this_cpu, int that_cpu) | ||
1070 | { | ||
1071 | return true; | ||
1072 | } | ||
1073 | |||
1064 | #endif /* !CONFIG_SMP */ | 1074 | #endif /* !CONFIG_SMP */ |
1065 | 1075 | ||
1066 | 1076 | ||
@@ -1225,6 +1235,12 @@ struct sched_rt_entity { | |||
1225 | #endif | 1235 | #endif |
1226 | }; | 1236 | }; |
1227 | 1237 | ||
1238 | /* | ||
1239 | * default timeslice is 100 msecs (used only for SCHED_RR tasks). | ||
1240 | * Timeslices get refilled after they expire. | ||
1241 | */ | ||
1242 | #define RR_TIMESLICE (100 * HZ / 1000) | ||
1243 | |||
1228 | struct rcu_node; | 1244 | struct rcu_node; |
1229 | 1245 | ||
1230 | enum perf_event_task_context { | 1246 | enum perf_event_task_context { |
@@ -2047,7 +2063,7 @@ extern void sched_autogroup_fork(struct signal_struct *sig); | |||
2047 | extern void sched_autogroup_exit(struct signal_struct *sig); | 2063 | extern void sched_autogroup_exit(struct signal_struct *sig); |
2048 | #ifdef CONFIG_PROC_FS | 2064 | #ifdef CONFIG_PROC_FS |
2049 | extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); | 2065 | extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); |
2050 | extern int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice); | 2066 | extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); |
2051 | #endif | 2067 | #endif |
2052 | #else | 2068 | #else |
2053 | static inline void sched_autogroup_create_attach(struct task_struct *p) { } | 2069 | static inline void sched_autogroup_create_attach(struct task_struct *p) { } |
@@ -2064,12 +2080,20 @@ extern unsigned int sysctl_sched_cfs_bandwidth_slice; | |||
2064 | extern int rt_mutex_getprio(struct task_struct *p); | 2080 | extern int rt_mutex_getprio(struct task_struct *p); |
2065 | extern void rt_mutex_setprio(struct task_struct *p, int prio); | 2081 | extern void rt_mutex_setprio(struct task_struct *p, int prio); |
2066 | extern void rt_mutex_adjust_pi(struct task_struct *p); | 2082 | extern void rt_mutex_adjust_pi(struct task_struct *p); |
2083 | static inline bool tsk_is_pi_blocked(struct task_struct *tsk) | ||
2084 | { | ||
2085 | return tsk->pi_blocked_on != NULL; | ||
2086 | } | ||
2067 | #else | 2087 | #else |
2068 | static inline int rt_mutex_getprio(struct task_struct *p) | 2088 | static inline int rt_mutex_getprio(struct task_struct *p) |
2069 | { | 2089 | { |
2070 | return p->normal_prio; | 2090 | return p->normal_prio; |
2071 | } | 2091 | } |
2072 | # define rt_mutex_adjust_pi(p) do { } while (0) | 2092 | # define rt_mutex_adjust_pi(p) do { } while (0) |
2093 | static inline bool tsk_is_pi_blocked(struct task_struct *tsk) | ||
2094 | { | ||
2095 | return false; | ||
2096 | } | ||
2073 | #endif | 2097 | #endif |
2074 | 2098 | ||
2075 | extern bool yield_to(struct task_struct *p, bool preempt); | 2099 | extern bool yield_to(struct task_struct *p, bool preempt); |
@@ -2388,12 +2412,15 @@ static inline void task_unlock(struct task_struct *p) | |||
2388 | extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, | 2412 | extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, |
2389 | unsigned long *flags); | 2413 | unsigned long *flags); |
2390 | 2414 | ||
2391 | #define lock_task_sighand(tsk, flags) \ | 2415 | static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk, |
2392 | ({ struct sighand_struct *__ss; \ | 2416 | unsigned long *flags) |
2393 | __cond_lock(&(tsk)->sighand->siglock, \ | 2417 | { |
2394 | (__ss = __lock_task_sighand(tsk, flags))); \ | 2418 | struct sighand_struct *ret; |
2395 | __ss; \ | 2419 | |
2396 | }) \ | 2420 | ret = __lock_task_sighand(tsk, flags); |
2421 | (void)__cond_lock(&tsk->sighand->siglock, ret); | ||
2422 | return ret; | ||
2423 | } | ||
2397 | 2424 | ||
2398 | static inline void unlock_task_sighand(struct task_struct *tsk, | 2425 | static inline void unlock_task_sighand(struct task_struct *tsk, |
2399 | unsigned long *flags) | 2426 | unsigned long *flags) |
diff --git a/include/linux/wait.h b/include/linux/wait.h index a9ce45e8501c..7d9a9e990ce6 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
@@ -157,7 +157,7 @@ void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); | |||
157 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); | 157 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); |
158 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, | 158 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, |
159 | void *key); | 159 | void *key); |
160 | void __wake_up_locked(wait_queue_head_t *q, unsigned int mode); | 160 | void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); |
161 | void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); | 161 | void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); |
162 | void __wake_up_bit(wait_queue_head_t *, void *, int); | 162 | void __wake_up_bit(wait_queue_head_t *, void *, int); |
163 | int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); | 163 | int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); |
@@ -170,7 +170,8 @@ wait_queue_head_t *bit_waitqueue(void *, int); | |||
170 | #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) | 170 | #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) |
171 | #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) | 171 | #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) |
172 | #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL) | 172 | #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL) |
173 | #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL) | 173 | #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1) |
174 | #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0) | ||
174 | 175 | ||
175 | #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) | 176 | #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) |
176 | #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) | 177 | #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) |