diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-07-02 12:52:58 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-07-02 12:52:58 -0400 |
| commit | 123f94f22e3d283dfe68742b269c245b0501ad82 (patch) | |
| tree | 1d40043b0909f309cf77204ea87be9e61f143e79 | |
| parent | 4b78c119f0ba715b4e29b190bf4d7bce810ea0d6 (diff) | |
| parent | 8c215bd3890c347dfb6a2db4779755f8b9c298a9 (diff) | |
Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
sched: Cure nr_iowait_cpu() users
init: Fix comment
init, sched: Fix race between init and kthreadd
| -rw-r--r-- | drivers/cpuidle/governors/menu.c | 4 | ||||
| -rw-r--r-- | include/linux/sched.h | 2 | ||||
| -rw-r--r-- | init/main.c | 12 | ||||
| -rw-r--r-- | kernel/sched.c | 4 | ||||
| -rw-r--r-- | kernel/time/tick-sched.c | 16 |
5 files changed, 25 insertions, 13 deletions
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 52ff8aa63f84..1b128702d300 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
| @@ -143,7 +143,7 @@ static inline int which_bucket(unsigned int duration) | |||
| 143 | * This allows us to calculate | 143 | * This allows us to calculate |
| 144 | * E(duration)|iowait | 144 | * E(duration)|iowait |
| 145 | */ | 145 | */ |
| 146 | if (nr_iowait_cpu()) | 146 | if (nr_iowait_cpu(smp_processor_id())) |
| 147 | bucket = BUCKETS/2; | 147 | bucket = BUCKETS/2; |
| 148 | 148 | ||
| 149 | if (duration < 10) | 149 | if (duration < 10) |
| @@ -175,7 +175,7 @@ static inline int performance_multiplier(void) | |||
| 175 | mult += 2 * get_loadavg(); | 175 | mult += 2 * get_loadavg(); |
| 176 | 176 | ||
| 177 | /* for IO wait tasks (per cpu!) we add 5x each */ | 177 | /* for IO wait tasks (per cpu!) we add 5x each */ |
| 178 | mult += 10 * nr_iowait_cpu(); | 178 | mult += 10 * nr_iowait_cpu(smp_processor_id()); |
| 179 | 179 | ||
| 180 | return mult; | 180 | return mult; |
| 181 | } | 181 | } |
diff --git a/include/linux/sched.h b/include/linux/sched.h index f118809c953f..747fcaedddb7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -139,7 +139,7 @@ extern int nr_processes(void); | |||
| 139 | extern unsigned long nr_running(void); | 139 | extern unsigned long nr_running(void); |
| 140 | extern unsigned long nr_uninterruptible(void); | 140 | extern unsigned long nr_uninterruptible(void); |
| 141 | extern unsigned long nr_iowait(void); | 141 | extern unsigned long nr_iowait(void); |
| 142 | extern unsigned long nr_iowait_cpu(void); | 142 | extern unsigned long nr_iowait_cpu(int cpu); |
| 143 | extern unsigned long this_cpu_load(void); | 143 | extern unsigned long this_cpu_load(void); |
| 144 | 144 | ||
| 145 | 145 | ||
diff --git a/init/main.c b/init/main.c index ac2e4a5f59ee..a42fdf4aeba9 100644 --- a/init/main.c +++ b/init/main.c | |||
| @@ -424,18 +424,26 @@ static void __init setup_command_line(char *command_line) | |||
| 424 | * gcc-3.4 accidentally inlines this function, so use noinline. | 424 | * gcc-3.4 accidentally inlines this function, so use noinline. |
| 425 | */ | 425 | */ |
| 426 | 426 | ||
| 427 | static __initdata DECLARE_COMPLETION(kthreadd_done); | ||
| 428 | |||
| 427 | static noinline void __init_refok rest_init(void) | 429 | static noinline void __init_refok rest_init(void) |
| 428 | __releases(kernel_lock) | 430 | __releases(kernel_lock) |
| 429 | { | 431 | { |
| 430 | int pid; | 432 | int pid; |
| 431 | 433 | ||
| 432 | rcu_scheduler_starting(); | 434 | rcu_scheduler_starting(); |
| 435 | /* | ||
| 436 | * We need to spawn init first so that it obtains pid 1, however | ||
| 437 | * the init task will end up wanting to create kthreads, which, if | ||
| 438 | * we schedule it before we create kthreadd, will OOPS. | ||
| 439 | */ | ||
| 433 | kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); | 440 | kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); |
| 434 | numa_default_policy(); | 441 | numa_default_policy(); |
| 435 | pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); | 442 | pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); |
| 436 | rcu_read_lock(); | 443 | rcu_read_lock(); |
| 437 | kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns); | 444 | kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns); |
| 438 | rcu_read_unlock(); | 445 | rcu_read_unlock(); |
| 446 | complete(&kthreadd_done); | ||
| 439 | unlock_kernel(); | 447 | unlock_kernel(); |
| 440 | 448 | ||
| 441 | /* | 449 | /* |
| @@ -857,6 +865,10 @@ static noinline int init_post(void) | |||
| 857 | 865 | ||
| 858 | static int __init kernel_init(void * unused) | 866 | static int __init kernel_init(void * unused) |
| 859 | { | 867 | { |
| 868 | /* | ||
| 869 | * Wait until kthreadd is all set-up. | ||
| 870 | */ | ||
| 871 | wait_for_completion(&kthreadd_done); | ||
| 860 | lock_kernel(); | 872 | lock_kernel(); |
| 861 | 873 | ||
| 862 | /* | 874 | /* |
diff --git a/kernel/sched.c b/kernel/sched.c index cb816e36cc8b..f52a8801b7a2 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -2873,9 +2873,9 @@ unsigned long nr_iowait(void) | |||
| 2873 | return sum; | 2873 | return sum; |
| 2874 | } | 2874 | } |
| 2875 | 2875 | ||
| 2876 | unsigned long nr_iowait_cpu(void) | 2876 | unsigned long nr_iowait_cpu(int cpu) |
| 2877 | { | 2877 | { |
| 2878 | struct rq *this = this_rq(); | 2878 | struct rq *this = cpu_rq(cpu); |
| 2879 | return atomic_read(&this->nr_iowait); | 2879 | return atomic_read(&this->nr_iowait); |
| 2880 | } | 2880 | } |
| 2881 | 2881 | ||
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 783fbadf2202..813993b5fb61 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -154,14 +154,14 @@ static void tick_nohz_update_jiffies(ktime_t now) | |||
| 154 | * Updates the per cpu time idle statistics counters | 154 | * Updates the per cpu time idle statistics counters |
| 155 | */ | 155 | */ |
| 156 | static void | 156 | static void |
| 157 | update_ts_time_stats(struct tick_sched *ts, ktime_t now, u64 *last_update_time) | 157 | update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) |
| 158 | { | 158 | { |
| 159 | ktime_t delta; | 159 | ktime_t delta; |
| 160 | 160 | ||
| 161 | if (ts->idle_active) { | 161 | if (ts->idle_active) { |
| 162 | delta = ktime_sub(now, ts->idle_entrytime); | 162 | delta = ktime_sub(now, ts->idle_entrytime); |
| 163 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | 163 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); |
| 164 | if (nr_iowait_cpu() > 0) | 164 | if (nr_iowait_cpu(cpu) > 0) |
| 165 | ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); | 165 | ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); |
| 166 | ts->idle_entrytime = now; | 166 | ts->idle_entrytime = now; |
| 167 | } | 167 | } |
| @@ -175,19 +175,19 @@ static void tick_nohz_stop_idle(int cpu, ktime_t now) | |||
| 175 | { | 175 | { |
| 176 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 176 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
| 177 | 177 | ||
| 178 | update_ts_time_stats(ts, now, NULL); | 178 | update_ts_time_stats(cpu, ts, now, NULL); |
| 179 | ts->idle_active = 0; | 179 | ts->idle_active = 0; |
| 180 | 180 | ||
| 181 | sched_clock_idle_wakeup_event(0); | 181 | sched_clock_idle_wakeup_event(0); |
| 182 | } | 182 | } |
| 183 | 183 | ||
| 184 | static ktime_t tick_nohz_start_idle(struct tick_sched *ts) | 184 | static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) |
| 185 | { | 185 | { |
| 186 | ktime_t now; | 186 | ktime_t now; |
| 187 | 187 | ||
| 188 | now = ktime_get(); | 188 | now = ktime_get(); |
| 189 | 189 | ||
| 190 | update_ts_time_stats(ts, now, NULL); | 190 | update_ts_time_stats(cpu, ts, now, NULL); |
| 191 | 191 | ||
| 192 | ts->idle_entrytime = now; | 192 | ts->idle_entrytime = now; |
| 193 | ts->idle_active = 1; | 193 | ts->idle_active = 1; |
| @@ -216,7 +216,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) | |||
| 216 | if (!tick_nohz_enabled) | 216 | if (!tick_nohz_enabled) |
| 217 | return -1; | 217 | return -1; |
| 218 | 218 | ||
| 219 | update_ts_time_stats(ts, ktime_get(), last_update_time); | 219 | update_ts_time_stats(cpu, ts, ktime_get(), last_update_time); |
| 220 | 220 | ||
| 221 | return ktime_to_us(ts->idle_sleeptime); | 221 | return ktime_to_us(ts->idle_sleeptime); |
| 222 | } | 222 | } |
| @@ -242,7 +242,7 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) | |||
| 242 | if (!tick_nohz_enabled) | 242 | if (!tick_nohz_enabled) |
| 243 | return -1; | 243 | return -1; |
| 244 | 244 | ||
| 245 | update_ts_time_stats(ts, ktime_get(), last_update_time); | 245 | update_ts_time_stats(cpu, ts, ktime_get(), last_update_time); |
| 246 | 246 | ||
| 247 | return ktime_to_us(ts->iowait_sleeptime); | 247 | return ktime_to_us(ts->iowait_sleeptime); |
| 248 | } | 248 | } |
| @@ -284,7 +284,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
| 284 | */ | 284 | */ |
| 285 | ts->inidle = 1; | 285 | ts->inidle = 1; |
| 286 | 286 | ||
| 287 | now = tick_nohz_start_idle(ts); | 287 | now = tick_nohz_start_idle(cpu, ts); |
| 288 | 288 | ||
| 289 | /* | 289 | /* |
| 290 | * If this cpu is offline and it is the one which updates | 290 | * If this cpu is offline and it is the one which updates |
