aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-06-23 05:11:42 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-23 05:11:42 -0400
commitf34bfb1beef8a17ba3d46b60f8fa19ffedc1ed8d (patch)
tree0816c565d26ae24854616faa0d497be365c2fe8b /kernel
parentee4311adf105f4d740f52e3948acc1d81598afcc (diff)
parent481c5346d0981940ee63037eb53e4e37b0735c10 (diff)
Merge branch 'linus' into tracing/ftrace
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c10
-rw-r--r--kernel/rcupreempt.c2
-rw-r--r--kernel/sched.c34
-rw-r--r--kernel/sched_rt.c63
-rw-r--r--kernel/sched_stats.h6
-rw-r--r--kernel/softlockup.c15
6 files changed, 91 insertions, 39 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 039baa4cd90c..9fceb97e989c 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1037,8 +1037,8 @@ int current_cpuset_is_being_rebound(void)
1037 1037
1038static int update_relax_domain_level(struct cpuset *cs, s64 val) 1038static int update_relax_domain_level(struct cpuset *cs, s64 val)
1039{ 1039{
1040 if ((int)val < 0) 1040 if (val < -1 || val >= SD_LV_MAX)
1041 val = -1; 1041 return -EINVAL;
1042 1042
1043 if (val != cs->relax_domain_level) { 1043 if (val != cs->relax_domain_level) {
1044 cs->relax_domain_level = val; 1044 cs->relax_domain_level = val;
@@ -1890,6 +1890,12 @@ static void common_cpu_mem_hotplug_unplug(void)
1890 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; 1890 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
1891 scan_for_empty_cpusets(&top_cpuset); 1891 scan_for_empty_cpusets(&top_cpuset);
1892 1892
1893 /*
1894 * Scheduler destroys domains on hotplug events.
1895 * Rebuild them based on the current settings.
1896 */
1897 rebuild_sched_domains();
1898
1893 cgroup_unlock(); 1899 cgroup_unlock();
1894} 1900}
1895 1901
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index e1cdf196a515..5e02b7740702 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -217,8 +217,6 @@ long rcu_batches_completed(void)
217} 217}
218EXPORT_SYMBOL_GPL(rcu_batches_completed); 218EXPORT_SYMBOL_GPL(rcu_batches_completed);
219 219
220EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
221
222void __rcu_read_lock(void) 220void __rcu_read_lock(void)
223{ 221{
224 int idx; 222 int idx;
diff --git a/kernel/sched.c b/kernel/sched.c
index 96fbbbf3722c..27eaa47d438a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1146,6 +1146,7 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
1146 return HRTIMER_NORESTART; 1146 return HRTIMER_NORESTART;
1147} 1147}
1148 1148
1149#ifdef CONFIG_SMP
1149static void hotplug_hrtick_disable(int cpu) 1150static void hotplug_hrtick_disable(int cpu)
1150{ 1151{
1151 struct rq *rq = cpu_rq(cpu); 1152 struct rq *rq = cpu_rq(cpu);
@@ -1201,6 +1202,7 @@ static void init_hrtick(void)
1201{ 1202{
1202 hotcpu_notifier(hotplug_hrtick, 0); 1203 hotcpu_notifier(hotplug_hrtick, 0);
1203} 1204}
1205#endif /* CONFIG_SMP */
1204 1206
1205static void init_rq_hrtick(struct rq *rq) 1207static void init_rq_hrtick(struct rq *rq)
1206{ 1208{
@@ -6928,7 +6930,12 @@ static int default_relax_domain_level = -1;
6928 6930
6929static int __init setup_relax_domain_level(char *str) 6931static int __init setup_relax_domain_level(char *str)
6930{ 6932{
6931 default_relax_domain_level = simple_strtoul(str, NULL, 0); 6933 unsigned long val;
6934
6935 val = simple_strtoul(str, NULL, 0);
6936 if (val < SD_LV_MAX)
6937 default_relax_domain_level = val;
6938
6932 return 1; 6939 return 1;
6933} 6940}
6934__setup("relax_domain_level=", setup_relax_domain_level); 6941__setup("relax_domain_level=", setup_relax_domain_level);
@@ -7287,6 +7294,18 @@ void __attribute__((weak)) arch_update_cpu_topology(void)
7287} 7294}
7288 7295
7289/* 7296/*
7297 * Free current domain masks.
7298 * Called after all cpus are attached to NULL domain.
7299 */
7300static void free_sched_domains(void)
7301{
7302 ndoms_cur = 0;
7303 if (doms_cur != &fallback_doms)
7304 kfree(doms_cur);
7305 doms_cur = &fallback_doms;
7306}
7307
7308/*
7290 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 7309 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
7291 * For now this just excludes isolated cpus, but could be used to 7310 * For now this just excludes isolated cpus, but could be used to
7292 * exclude other special cases in the future. 7311 * exclude other special cases in the future.
@@ -7433,6 +7452,7 @@ int arch_reinit_sched_domains(void)
7433 get_online_cpus(); 7452 get_online_cpus();
7434 mutex_lock(&sched_domains_mutex); 7453 mutex_lock(&sched_domains_mutex);
7435 detach_destroy_domains(&cpu_online_map); 7454 detach_destroy_domains(&cpu_online_map);
7455 free_sched_domains();
7436 err = arch_init_sched_domains(&cpu_online_map); 7456 err = arch_init_sched_domains(&cpu_online_map);
7437 mutex_unlock(&sched_domains_mutex); 7457 mutex_unlock(&sched_domains_mutex);
7438 put_online_cpus(); 7458 put_online_cpus();
@@ -7518,6 +7538,7 @@ static int update_sched_domains(struct notifier_block *nfb,
7518 case CPU_DOWN_PREPARE: 7538 case CPU_DOWN_PREPARE:
7519 case CPU_DOWN_PREPARE_FROZEN: 7539 case CPU_DOWN_PREPARE_FROZEN:
7520 detach_destroy_domains(&cpu_online_map); 7540 detach_destroy_domains(&cpu_online_map);
7541 free_sched_domains();
7521 return NOTIFY_OK; 7542 return NOTIFY_OK;
7522 7543
7523 case CPU_UP_CANCELED: 7544 case CPU_UP_CANCELED:
@@ -7536,8 +7557,16 @@ static int update_sched_domains(struct notifier_block *nfb,
7536 return NOTIFY_DONE; 7557 return NOTIFY_DONE;
7537 } 7558 }
7538 7559
7560#ifndef CONFIG_CPUSETS
7561 /*
7562 * Create default domain partitioning if cpusets are disabled.
7563 * Otherwise we let cpusets rebuild the domains based on the
7564 * current setup.
7565 */
7566
7539 /* The hotplug lock is already held by cpu_up/cpu_down */ 7567 /* The hotplug lock is already held by cpu_up/cpu_down */
7540 arch_init_sched_domains(&cpu_online_map); 7568 arch_init_sched_domains(&cpu_online_map);
7569#endif
7541 7570
7542 return NOTIFY_OK; 7571 return NOTIFY_OK;
7543} 7572}
@@ -7677,7 +7706,6 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
7677 else 7706 else
7678 rt_se->rt_rq = parent->my_q; 7707 rt_se->rt_rq = parent->my_q;
7679 7708
7680 rt_se->rt_rq = &rq->rt;
7681 rt_se->my_q = rt_rq; 7709 rt_se->my_q = rt_rq;
7682 rt_se->parent = parent; 7710 rt_se->parent = parent;
7683 INIT_LIST_HEAD(&rt_se->run_list); 7711 INIT_LIST_HEAD(&rt_se->run_list);
@@ -8399,7 +8427,7 @@ static unsigned long to_ratio(u64 period, u64 runtime)
8399#ifdef CONFIG_CGROUP_SCHED 8427#ifdef CONFIG_CGROUP_SCHED
8400static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) 8428static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
8401{ 8429{
8402 struct task_group *tgi, *parent = tg->parent; 8430 struct task_group *tgi, *parent = tg ? tg->parent : NULL;
8403 unsigned long total = 0; 8431 unsigned long total = 0;
8404 8432
8405 if (!parent) { 8433 if (!parent) {
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 3432d573205d..1dad5bbb59b6 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -449,13 +449,19 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
449#endif 449#endif
450} 450}
451 451
452static void enqueue_rt_entity(struct sched_rt_entity *rt_se) 452static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
453{ 453{
454 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 454 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
455 struct rt_prio_array *array = &rt_rq->active; 455 struct rt_prio_array *array = &rt_rq->active;
456 struct rt_rq *group_rq = group_rt_rq(rt_se); 456 struct rt_rq *group_rq = group_rt_rq(rt_se);
457 457
458 if (group_rq && rt_rq_throttled(group_rq)) 458 /*
459 * Don't enqueue the group if its throttled, or when empty.
460 * The latter is a consequence of the former when a child group
461 * get throttled and the current group doesn't have any other
462 * active members.
463 */
464 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
459 return; 465 return;
460 466
461 list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); 467 list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
@@ -464,7 +470,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
464 inc_rt_tasks(rt_se, rt_rq); 470 inc_rt_tasks(rt_se, rt_rq);
465} 471}
466 472
467static void dequeue_rt_entity(struct sched_rt_entity *rt_se) 473static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
468{ 474{
469 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 475 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
470 struct rt_prio_array *array = &rt_rq->active; 476 struct rt_prio_array *array = &rt_rq->active;
@@ -480,11 +486,10 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
480 * Because the prio of an upper entry depends on the lower 486 * Because the prio of an upper entry depends on the lower
481 * entries, we must remove entries top - down. 487 * entries, we must remove entries top - down.
482 */ 488 */
483static void dequeue_rt_stack(struct task_struct *p) 489static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
484{ 490{
485 struct sched_rt_entity *rt_se, *back = NULL; 491 struct sched_rt_entity *back = NULL;
486 492
487 rt_se = &p->rt;
488 for_each_sched_rt_entity(rt_se) { 493 for_each_sched_rt_entity(rt_se) {
489 rt_se->back = back; 494 rt_se->back = back;
490 back = rt_se; 495 back = rt_se;
@@ -492,7 +497,26 @@ static void dequeue_rt_stack(struct task_struct *p)
492 497
493 for (rt_se = back; rt_se; rt_se = rt_se->back) { 498 for (rt_se = back; rt_se; rt_se = rt_se->back) {
494 if (on_rt_rq(rt_se)) 499 if (on_rt_rq(rt_se))
495 dequeue_rt_entity(rt_se); 500 __dequeue_rt_entity(rt_se);
501 }
502}
503
504static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
505{
506 dequeue_rt_stack(rt_se);
507 for_each_sched_rt_entity(rt_se)
508 __enqueue_rt_entity(rt_se);
509}
510
511static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
512{
513 dequeue_rt_stack(rt_se);
514
515 for_each_sched_rt_entity(rt_se) {
516 struct rt_rq *rt_rq = group_rt_rq(rt_se);
517
518 if (rt_rq && rt_rq->rt_nr_running)
519 __enqueue_rt_entity(rt_se);
496 } 520 }
497} 521}
498 522
@@ -506,32 +530,15 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
506 if (wakeup) 530 if (wakeup)
507 rt_se->timeout = 0; 531 rt_se->timeout = 0;
508 532
509 dequeue_rt_stack(p); 533 enqueue_rt_entity(rt_se);
510
511 /*
512 * enqueue everybody, bottom - up.
513 */
514 for_each_sched_rt_entity(rt_se)
515 enqueue_rt_entity(rt_se);
516} 534}
517 535
518static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) 536static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
519{ 537{
520 struct sched_rt_entity *rt_se = &p->rt; 538 struct sched_rt_entity *rt_se = &p->rt;
521 struct rt_rq *rt_rq;
522 539
523 update_curr_rt(rq); 540 update_curr_rt(rq);
524 541 dequeue_rt_entity(rt_se);
525 dequeue_rt_stack(p);
526
527 /*
528 * re-enqueue all non-empty rt_rq entities.
529 */
530 for_each_sched_rt_entity(rt_se) {
531 rt_rq = group_rt_rq(rt_se);
532 if (rt_rq && rt_rq->rt_nr_running)
533 enqueue_rt_entity(rt_se);
534 }
535} 542}
536 543
537/* 544/*
@@ -542,8 +549,10 @@ static
542void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) 549void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
543{ 550{
544 struct rt_prio_array *array = &rt_rq->active; 551 struct rt_prio_array *array = &rt_rq->active;
552 struct list_head *queue = array->queue + rt_se_prio(rt_se);
545 553
546 list_move_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); 554 if (on_rt_rq(rt_se))
555 list_move_tail(&rt_se->run_list, queue);
547} 556}
548 557
549static void requeue_task_rt(struct rq *rq, struct task_struct *p) 558static void requeue_task_rt(struct rq *rq, struct task_struct *p)
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index a38878e0e49d..80179ef7450e 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -198,6 +198,9 @@ static inline void sched_info_queued(struct task_struct *t)
198/* 198/*
199 * Called when a process ceases being the active-running process, either 199 * Called when a process ceases being the active-running process, either
200 * voluntarily or involuntarily. Now we can calculate how long we ran. 200 * voluntarily or involuntarily. Now we can calculate how long we ran.
201 * Also, if the process is still in the TASK_RUNNING state, call
202 * sched_info_queued() to mark that it has now again started waiting on
203 * the runqueue.
201 */ 204 */
202static inline void sched_info_depart(struct task_struct *t) 205static inline void sched_info_depart(struct task_struct *t)
203{ 206{
@@ -206,6 +209,9 @@ static inline void sched_info_depart(struct task_struct *t)
206 209
207 t->sched_info.cpu_time += delta; 210 t->sched_info.cpu_time += delta;
208 rq_sched_info_depart(task_rq(t), delta); 211 rq_sched_info_depart(task_rq(t), delta);
212
213 if (t->state == TASK_RUNNING)
214 sched_info_queued(t);
209} 215}
210 216
211/* 217/*
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 01b6522fd92b..c828c2339cc9 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -49,12 +49,17 @@ static unsigned long get_timestamp(int this_cpu)
49 return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ 49 return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
50} 50}
51 51
52void touch_softlockup_watchdog(void) 52static void __touch_softlockup_watchdog(void)
53{ 53{
54 int this_cpu = raw_smp_processor_id(); 54 int this_cpu = raw_smp_processor_id();
55 55
56 __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu); 56 __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu);
57} 57}
58
59void touch_softlockup_watchdog(void)
60{
61 __raw_get_cpu_var(touch_timestamp) = 0;
62}
58EXPORT_SYMBOL(touch_softlockup_watchdog); 63EXPORT_SYMBOL(touch_softlockup_watchdog);
59 64
60void touch_all_softlockup_watchdogs(void) 65void touch_all_softlockup_watchdogs(void)
@@ -80,7 +85,7 @@ void softlockup_tick(void)
80 unsigned long now; 85 unsigned long now;
81 86
82 if (touch_timestamp == 0) { 87 if (touch_timestamp == 0) {
83 touch_softlockup_watchdog(); 88 __touch_softlockup_watchdog();
84 return; 89 return;
85 } 90 }
86 91
@@ -95,7 +100,7 @@ void softlockup_tick(void)
95 100
96 /* do not print during early bootup: */ 101 /* do not print during early bootup: */
97 if (unlikely(system_state != SYSTEM_RUNNING)) { 102 if (unlikely(system_state != SYSTEM_RUNNING)) {
98 touch_softlockup_watchdog(); 103 __touch_softlockup_watchdog();
99 return; 104 return;
100 } 105 }
101 106
@@ -214,7 +219,7 @@ static int watchdog(void *__bind_cpu)
214 sched_setscheduler(current, SCHED_FIFO, &param); 219 sched_setscheduler(current, SCHED_FIFO, &param);
215 220
216 /* initialize timestamp */ 221 /* initialize timestamp */
217 touch_softlockup_watchdog(); 222 __touch_softlockup_watchdog();
218 223
219 set_current_state(TASK_INTERRUPTIBLE); 224 set_current_state(TASK_INTERRUPTIBLE);
220 /* 225 /*
@@ -223,7 +228,7 @@ static int watchdog(void *__bind_cpu)
223 * debug-printout triggers in softlockup_tick(). 228 * debug-printout triggers in softlockup_tick().
224 */ 229 */
225 while (!kthread_should_stop()) { 230 while (!kthread_should_stop()) {
226 touch_softlockup_watchdog(); 231 __touch_softlockup_watchdog();
227 schedule(); 232 schedule();
228 233
229 if (kthread_should_stop()) 234 if (kthread_should_stop())