diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-10 18:32:59 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-10 18:32:59 -0400 |
| commit | 99e97b860e14c64760855198e91d1166697131a7 (patch) | |
| tree | fadc8368c3f784bff92fba82d983e7861559cf9d /include | |
| parent | 82782ca77d1bfb32b0334cce40a25b91bd8ec016 (diff) | |
| parent | f04d82b7e0c63d0251f9952a537a4bc4d73aa1a9 (diff) | |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
sched: fix typo in sched-rt-group.txt file
ftrace: fix typo about map of kernel priority in ftrace.txt file.
sched: properly define the sched_group::cpumask and sched_domain::span fields
sched, timers: cleanup avenrun users
sched, timers: move calc_load() to scheduler
sched: Don't export sched_mc_power_savings on multi-socket single core system
sched: emit thread info flags with stack trace
sched: rt: document the risk of small values in the bandwidth settings
sched: Replace first_cpu() with cpumask_first() in ILB nomination code
sched: remove extra call overhead for schedule()
sched: use group_first_cpu() instead of cpumask_first(sched_group_cpus())
wait: don't use __wake_up_common()
sched: Nominate a power-efficient ilb in select_nohz_balancer()
sched: Nominate idle load balancer from a semi-idle package.
sched: remove redundant hierarchy walk in check_preempt_wakeup
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/sched.h | 28 | ||||
| -rw-r--r-- | include/linux/wait.h | 2 |
2 files changed, 24 insertions, 6 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index b4c38bc8049c..dbb1043e8656 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -116,6 +116,7 @@ struct fs_struct; | |||
| 116 | * 11 bit fractions. | 116 | * 11 bit fractions. |
| 117 | */ | 117 | */ |
| 118 | extern unsigned long avenrun[]; /* Load averages */ | 118 | extern unsigned long avenrun[]; /* Load averages */ |
| 119 | extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); | ||
| 119 | 120 | ||
| 120 | #define FSHIFT 11 /* nr of bits of precision */ | 121 | #define FSHIFT 11 /* nr of bits of precision */ |
| 121 | #define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ | 122 | #define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ |
| @@ -135,8 +136,8 @@ DECLARE_PER_CPU(unsigned long, process_counts); | |||
| 135 | extern int nr_processes(void); | 136 | extern int nr_processes(void); |
| 136 | extern unsigned long nr_running(void); | 137 | extern unsigned long nr_running(void); |
| 137 | extern unsigned long nr_uninterruptible(void); | 138 | extern unsigned long nr_uninterruptible(void); |
| 138 | extern unsigned long nr_active(void); | ||
| 139 | extern unsigned long nr_iowait(void); | 139 | extern unsigned long nr_iowait(void); |
| 140 | extern void calc_global_load(void); | ||
| 140 | 141 | ||
| 141 | extern unsigned long get_parent_ip(unsigned long addr); | 142 | extern unsigned long get_parent_ip(unsigned long addr); |
| 142 | 143 | ||
| @@ -838,7 +839,17 @@ struct sched_group { | |||
| 838 | */ | 839 | */ |
| 839 | u32 reciprocal_cpu_power; | 840 | u32 reciprocal_cpu_power; |
| 840 | 841 | ||
| 841 | unsigned long cpumask[]; | 842 | /* |
| 843 | * The CPUs this group covers. | ||
| 844 | * | ||
| 845 | * NOTE: this field is variable length. (Allocated dynamically | ||
| 846 | * by attaching extra space to the end of the structure, | ||
| 847 | * depending on how many CPUs the kernel has booted up with) | ||
| 848 | * | ||
| 849 | * It is also be embedded into static data structures at build | ||
| 850 | * time. (See 'struct static_sched_group' in kernel/sched.c) | ||
| 851 | */ | ||
| 852 | unsigned long cpumask[0]; | ||
| 842 | }; | 853 | }; |
| 843 | 854 | ||
| 844 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) | 855 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) |
| @@ -924,8 +935,17 @@ struct sched_domain { | |||
| 924 | char *name; | 935 | char *name; |
| 925 | #endif | 936 | #endif |
| 926 | 937 | ||
| 927 | /* span of all CPUs in this domain */ | 938 | /* |
| 928 | unsigned long span[]; | 939 | * Span of all CPUs in this domain. |
| 940 | * | ||
| 941 | * NOTE: this field is variable length. (Allocated dynamically | ||
| 942 | * by attaching extra space to the end of the structure, | ||
| 943 | * depending on how many CPUs the kernel has booted up with) | ||
| 944 | * | ||
| 945 | * It is also be embedded into static data structures at build | ||
| 946 | * time. (See 'struct static_sched_domain' in kernel/sched.c) | ||
| 947 | */ | ||
| 948 | unsigned long span[0]; | ||
| 929 | }; | 949 | }; |
| 930 | 950 | ||
| 931 | static inline struct cpumask *sched_domain_span(struct sched_domain *sd) | 951 | static inline struct cpumask *sched_domain_span(struct sched_domain *sd) |
diff --git a/include/linux/wait.h b/include/linux/wait.h index bc024632f365..6788e1a4d4ca 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
| @@ -132,8 +132,6 @@ static inline void __remove_wait_queue(wait_queue_head_t *head, | |||
| 132 | list_del(&old->task_list); | 132 | list_del(&old->task_list); |
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | ||
| 136 | int nr_exclusive, int sync, void *key); | ||
| 137 | void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); | 135 | void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); |
| 138 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); | 136 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); |
| 139 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, | 137 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, |
