aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-10 15:42:31 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-10 15:42:31 -0400
commitb11ce8a26d26ed9019a8803aa90d580b52f23e79 (patch)
tree332f7b59487335229119c0ede371af3a9783d577 /include
parentf6bccf695431da0e9bd773550ae91b8cb9ffb227 (diff)
parenta5d8c3483a6e19aca95ef6a2c5890e33bfa5b293 (diff)
Merge branch 'sched-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (38 commits) sched debug: add name to sched_domain sysctl entries sched: sync wakeups vs avg_overlap sched: remove redundant code in cpu_cgroup_create() sched_rt.c: resch needed in rt_rq_enqueue() for the root rt_rq cpusets: scan_for_empty_cpusets(), cpuset doesn't seem to be so const sched: minor optimizations in wake_affine and select_task_rq_fair sched: maintain only task entities in cfs_rq->tasks list sched: fixup buddy selection sched: more sanity checks on the bandwidth settings sched: add some comments to the bandwidth code sched: fixlet for group load balance sched: rework wakeup preemption CFS scheduler: documentation about scheduling policies sched: clarify ifdef tangle sched: fix list traversal to use _rcu variant sched: turn off WAKEUP_OVERLAP sched: wakeup preempt when small overlap kernel/cpu.c: create a CPU_STARTING cpu_chain notifier kernel/cpu.c: Move the CPU_DYING notifiers sched: fix __load_balance_iterator() for cfq with only one task ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/completion.h41
-rw-r--r--include/linux/cpu.h1
-rw-r--r--include/linux/notifier.h10
-rw-r--r--include/linux/proportions.h2
-rw-r--r--include/linux/sched.h9
5 files changed, 58 insertions, 5 deletions
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 02ef8835999c..4a6b604ef7e4 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -10,6 +10,18 @@
10 10
11#include <linux/wait.h> 11#include <linux/wait.h>
12 12
13/**
14 * struct completion - structure used to maintain state for a "completion"
15 *
16 * This is the opaque structure used to maintain the state for a "completion".
17 * Completions currently use a FIFO to queue threads that have to wait for
18 * the "completion" event.
19 *
20 * See also: complete(), wait_for_completion() (and friends _timeout,
21 * _interruptible, _interruptible_timeout, and _killable), init_completion(),
22 * and macros DECLARE_COMPLETION(), DECLARE_COMPLETION_ONSTACK(), and
23 * INIT_COMPLETION().
24 */
13struct completion { 25struct completion {
14 unsigned int done; 26 unsigned int done;
15 wait_queue_head_t wait; 27 wait_queue_head_t wait;
@@ -21,6 +33,14 @@ struct completion {
21#define COMPLETION_INITIALIZER_ONSTACK(work) \ 33#define COMPLETION_INITIALIZER_ONSTACK(work) \
22 ({ init_completion(&work); work; }) 34 ({ init_completion(&work); work; })
23 35
36/**
37 * DECLARE_COMPLETION: - declare and initialize a completion structure
38 * @work: identifier for the completion structure
39 *
40 * This macro declares and initializes a completion structure. Generally used
41 * for static declarations. You should use the _ONSTACK variant for automatic
42 * variables.
43 */
24#define DECLARE_COMPLETION(work) \ 44#define DECLARE_COMPLETION(work) \
25 struct completion work = COMPLETION_INITIALIZER(work) 45 struct completion work = COMPLETION_INITIALIZER(work)
26 46
@@ -29,6 +49,13 @@ struct completion {
29 * completions - so we use the _ONSTACK() variant for those that 49 * completions - so we use the _ONSTACK() variant for those that
30 * are on the kernel stack: 50 * are on the kernel stack:
31 */ 51 */
52/**
53 * DECLARE_COMPLETION_ONSTACK: - declare and initialize a completion structure
54 * @work: identifier for the completion structure
55 *
56 * This macro declares and initializes a completion structure on the kernel
57 * stack.
58 */
32#ifdef CONFIG_LOCKDEP 59#ifdef CONFIG_LOCKDEP
33# define DECLARE_COMPLETION_ONSTACK(work) \ 60# define DECLARE_COMPLETION_ONSTACK(work) \
34 struct completion work = COMPLETION_INITIALIZER_ONSTACK(work) 61 struct completion work = COMPLETION_INITIALIZER_ONSTACK(work)
@@ -36,6 +63,13 @@ struct completion {
36# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work) 63# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
37#endif 64#endif
38 65
66/**
67 * init_completion: - Initialize a dynamically allocated completion
68 * @x: completion structure that is to be initialized
69 *
70 * This inline function will initialize a dynamically created completion
71 * structure.
72 */
39static inline void init_completion(struct completion *x) 73static inline void init_completion(struct completion *x)
40{ 74{
41 x->done = 0; 75 x->done = 0;
@@ -55,6 +89,13 @@ extern bool completion_done(struct completion *x);
55extern void complete(struct completion *); 89extern void complete(struct completion *);
56extern void complete_all(struct completion *); 90extern void complete_all(struct completion *);
57 91
92/**
93 * INIT_COMPLETION: - reinitialize a completion structure
94 * @x: completion structure to be reinitialized
95 *
96 * This macro should be used to reinitialize a completion structure so it can
97 * be reused. This is especially important after complete_all() is used.
98 */
58#define INIT_COMPLETION(x) ((x).done = 0) 99#define INIT_COMPLETION(x) ((x).done = 0)
59 100
60 101
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index d7faf8808497..c2747ac2ae43 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -69,6 +69,7 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb)
69#endif 69#endif
70 70
71int cpu_up(unsigned int cpu); 71int cpu_up(unsigned int cpu);
72void notify_cpu_starting(unsigned int cpu);
72extern void cpu_hotplug_init(void); 73extern void cpu_hotplug_init(void);
73extern void cpu_maps_update_begin(void); 74extern void cpu_maps_update_begin(void);
74extern void cpu_maps_update_done(void); 75extern void cpu_maps_update_done(void);
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index da2698b0fdd1..b86fa2ffca0c 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -213,9 +213,16 @@ static inline int notifier_to_errno(int ret)
213#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ 213#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
214#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ 214#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
215#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task, 215#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
216 * not handling interrupts, soon dead */ 216 * not handling interrupts, soon dead.
217 * Called on the dying cpu, interrupts
218 * are already disabled. Must not
219 * sleep, must not fail */
217#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug 220#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
218 * lock is dropped */ 221 * lock is dropped */
222#define CPU_STARTING 0x000A /* CPU (unsigned)v soon running.
223 * Called on the new cpu, just before
224 * enabling interrupts. Must not sleep,
225 * must not fail */
219 226
220/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend 227/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
221 * operation in progress 228 * operation in progress
@@ -229,6 +236,7 @@ static inline int notifier_to_errno(int ret)
229#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN) 236#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
230#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) 237#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
231#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN) 238#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
239#define CPU_STARTING_FROZEN (CPU_STARTING | CPU_TASKS_FROZEN)
232 240
233/* Hibernation and suspend events */ 241/* Hibernation and suspend events */
234#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */ 242#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
index 5afc1b23346d..cf793bbbd05e 100644
--- a/include/linux/proportions.h
+++ b/include/linux/proportions.h
@@ -104,8 +104,8 @@ struct prop_local_single {
104 * snapshot of the last seen global state 104 * snapshot of the last seen global state
105 * and a lock protecting this state 105 * and a lock protecting this state
106 */ 106 */
107 int shift;
108 unsigned long period; 107 unsigned long period;
108 int shift;
109 spinlock_t lock; /* protect the snapshot state */ 109 spinlock_t lock; /* protect the snapshot state */
110}; 110};
111 111
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3d9120c5ad15..5d0819ee442a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -451,8 +451,8 @@ struct signal_struct {
451 * - everyone except group_exit_task is stopped during signal delivery 451 * - everyone except group_exit_task is stopped during signal delivery
452 * of fatal signals, group_exit_task processes the signal. 452 * of fatal signals, group_exit_task processes the signal.
453 */ 453 */
454 struct task_struct *group_exit_task;
455 int notify_count; 454 int notify_count;
455 struct task_struct *group_exit_task;
456 456
457 /* thread group stop support, overloads group_exit_code too */ 457 /* thread group stop support, overloads group_exit_code too */
458 int group_stop_count; 458 int group_stop_count;
@@ -824,6 +824,9 @@ struct sched_domain {
824 unsigned int ttwu_move_affine; 824 unsigned int ttwu_move_affine;
825 unsigned int ttwu_move_balance; 825 unsigned int ttwu_move_balance;
826#endif 826#endif
827#ifdef CONFIG_SCHED_DEBUG
828 char *name;
829#endif
827}; 830};
828 831
829extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 832extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
@@ -897,7 +900,7 @@ struct sched_class {
897 void (*yield_task) (struct rq *rq); 900 void (*yield_task) (struct rq *rq);
898 int (*select_task_rq)(struct task_struct *p, int sync); 901 int (*select_task_rq)(struct task_struct *p, int sync);
899 902
900 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); 903 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);
901 904
902 struct task_struct * (*pick_next_task) (struct rq *rq); 905 struct task_struct * (*pick_next_task) (struct rq *rq);
903 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 906 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
@@ -1010,8 +1013,8 @@ struct sched_entity {
1010 1013
1011struct sched_rt_entity { 1014struct sched_rt_entity {
1012 struct list_head run_list; 1015 struct list_head run_list;
1013 unsigned int time_slice;
1014 unsigned long timeout; 1016 unsigned long timeout;
1017 unsigned int time_slice;
1015 int nr_cpus_allowed; 1018 int nr_cpus_allowed;
1016 1019
1017 struct sched_rt_entity *back; 1020 struct sched_rt_entity *back;