diff options
author | Nick Piggin <nickpiggin@yahoo.com.au> | 2005-06-25 17:57:27 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-25 19:24:44 -0400 |
commit | 674311d5b411e9042df4fdf7aef0b3c8217b6240 (patch) | |
tree | 4ee6b739629e7fc33b519fd087a116b4fa33a217 /kernel/sched.c | |
parent | 3dbd5342074a1e570ec84edf859deb9be588006d (diff) |
[PATCH] sched: RCU domains
One of the problems with the multilevel balance-on-fork/exec is that it needs
to jump through hoops to satisfy sched-domain's locking semantics (that is,
you may traverse your own domain when not preemptable, and you may traverse
others' domains when holding their runqueue lock).
balance-on-exec had to potentially migrate between more than one CPU before
finding a final CPU to migrate to, and balance-on-fork needed to potentially
take multiple runqueue locks.
So bite the bullet and make sched-domains go completely RCU. This actually
simplifies the code quite a bit.
From: Ingo Molnar <mingo@elte.hu>
schedstats RCU fix, and a nice comment on for_each_domain, from Ingo.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 60 |
1 files changed, 15 insertions, 45 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index ef32389ee768..54ce787b6207 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -260,8 +260,15 @@ struct runqueue { | |||
260 | 260 | ||
261 | static DEFINE_PER_CPU(struct runqueue, runqueues); | 261 | static DEFINE_PER_CPU(struct runqueue, runqueues); |
262 | 262 | ||
263 | /* | ||
264 | * The domain tree (rq->sd) is protected by RCU's quiescent state transition. | ||
265 | * See update_sched_domains: synchronize_kernel for details. | ||
266 | * | ||
267 | * The domain tree of any CPU may only be accessed from within | ||
268 | * preempt-disabled sections. | ||
269 | */ | ||
263 | #define for_each_domain(cpu, domain) \ | 270 | #define for_each_domain(cpu, domain) \ |
264 | for (domain = cpu_rq(cpu)->sd; domain; domain = domain->parent) | 271 | for (domain = rcu_dereference(cpu_rq(cpu)->sd); domain; domain = domain->parent) |
265 | 272 | ||
266 | #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) | 273 | #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) |
267 | #define this_rq() (&__get_cpu_var(runqueues)) | 274 | #define this_rq() (&__get_cpu_var(runqueues)) |
@@ -395,6 +402,7 @@ static int show_schedstat(struct seq_file *seq, void *v) | |||
395 | 402 | ||
396 | #ifdef CONFIG_SMP | 403 | #ifdef CONFIG_SMP |
397 | /* domain-specific stats */ | 404 | /* domain-specific stats */ |
405 | preempt_disable(); | ||
398 | for_each_domain(cpu, sd) { | 406 | for_each_domain(cpu, sd) { |
399 | enum idle_type itype; | 407 | enum idle_type itype; |
400 | char mask_str[NR_CPUS]; | 408 | char mask_str[NR_CPUS]; |
@@ -419,6 +427,7 @@ static int show_schedstat(struct seq_file *seq, void *v) | |||
419 | sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed, | 427 | sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed, |
420 | sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance); | 428 | sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance); |
421 | } | 429 | } |
430 | preempt_enable(); | ||
422 | #endif | 431 | #endif |
423 | } | 432 | } |
424 | return 0; | 433 | return 0; |
@@ -824,22 +833,12 @@ inline int task_curr(const task_t *p) | |||
824 | } | 833 | } |
825 | 834 | ||
826 | #ifdef CONFIG_SMP | 835 | #ifdef CONFIG_SMP |
827 | enum request_type { | ||
828 | REQ_MOVE_TASK, | ||
829 | REQ_SET_DOMAIN, | ||
830 | }; | ||
831 | |||
832 | typedef struct { | 836 | typedef struct { |
833 | struct list_head list; | 837 | struct list_head list; |
834 | enum request_type type; | ||
835 | 838 | ||
836 | /* For REQ_MOVE_TASK */ | ||
837 | task_t *task; | 839 | task_t *task; |
838 | int dest_cpu; | 840 | int dest_cpu; |
839 | 841 | ||
840 | /* For REQ_SET_DOMAIN */ | ||
841 | struct sched_domain *sd; | ||
842 | |||
843 | struct completion done; | 842 | struct completion done; |
844 | } migration_req_t; | 843 | } migration_req_t; |
845 | 844 | ||
@@ -861,7 +860,6 @@ static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req) | |||
861 | } | 860 | } |
862 | 861 | ||
863 | init_completion(&req->done); | 862 | init_completion(&req->done); |
864 | req->type = REQ_MOVE_TASK; | ||
865 | req->task = p; | 863 | req->task = p; |
866 | req->dest_cpu = dest_cpu; | 864 | req->dest_cpu = dest_cpu; |
867 | list_add(&req->list, &rq->migration_queue); | 865 | list_add(&req->list, &rq->migration_queue); |
@@ -4378,17 +4376,9 @@ static int migration_thread(void * data) | |||
4378 | req = list_entry(head->next, migration_req_t, list); | 4376 | req = list_entry(head->next, migration_req_t, list); |
4379 | list_del_init(head->next); | 4377 | list_del_init(head->next); |
4380 | 4378 | ||
4381 | if (req->type == REQ_MOVE_TASK) { | 4379 | spin_unlock(&rq->lock); |
4382 | spin_unlock(&rq->lock); | 4380 | __migrate_task(req->task, cpu, req->dest_cpu); |
4383 | __migrate_task(req->task, cpu, req->dest_cpu); | 4381 | local_irq_enable(); |
4384 | local_irq_enable(); | ||
4385 | } else if (req->type == REQ_SET_DOMAIN) { | ||
4386 | rq->sd = req->sd; | ||
4387 | spin_unlock_irq(&rq->lock); | ||
4388 | } else { | ||
4389 | spin_unlock_irq(&rq->lock); | ||
4390 | WARN_ON(1); | ||
4391 | } | ||
4392 | 4382 | ||
4393 | complete(&req->done); | 4383 | complete(&req->done); |
4394 | } | 4384 | } |
@@ -4619,7 +4609,6 @@ static int migration_call(struct notifier_block *nfb, unsigned long action, | |||
4619 | migration_req_t *req; | 4609 | migration_req_t *req; |
4620 | req = list_entry(rq->migration_queue.next, | 4610 | req = list_entry(rq->migration_queue.next, |
4621 | migration_req_t, list); | 4611 | migration_req_t, list); |
4622 | BUG_ON(req->type != REQ_MOVE_TASK); | ||
4623 | list_del_init(&req->list); | 4612 | list_del_init(&req->list); |
4624 | complete(&req->done); | 4613 | complete(&req->done); |
4625 | } | 4614 | } |
@@ -4800,10 +4789,7 @@ static int __devinit sd_parent_degenerate(struct sched_domain *sd, | |||
4800 | */ | 4789 | */ |
4801 | void __devinit cpu_attach_domain(struct sched_domain *sd, int cpu) | 4790 | void __devinit cpu_attach_domain(struct sched_domain *sd, int cpu) |
4802 | { | 4791 | { |
4803 | migration_req_t req; | ||
4804 | unsigned long flags; | ||
4805 | runqueue_t *rq = cpu_rq(cpu); | 4792 | runqueue_t *rq = cpu_rq(cpu); |
4806 | int local = 1; | ||
4807 | struct sched_domain *tmp; | 4793 | struct sched_domain *tmp; |
4808 | 4794 | ||
4809 | /* Remove the sched domains which do not contribute to scheduling. */ | 4795 | /* Remove the sched domains which do not contribute to scheduling. */ |
@@ -4820,24 +4806,7 @@ void __devinit cpu_attach_domain(struct sched_domain *sd, int cpu) | |||
4820 | 4806 | ||
4821 | sched_domain_debug(sd, cpu); | 4807 | sched_domain_debug(sd, cpu); |
4822 | 4808 | ||
4823 | spin_lock_irqsave(&rq->lock, flags); | 4809 | rcu_assign_pointer(rq->sd, sd); |
4824 | |||
4825 | if (cpu == smp_processor_id() || !cpu_online(cpu)) { | ||
4826 | rq->sd = sd; | ||
4827 | } else { | ||
4828 | init_completion(&req.done); | ||
4829 | req.type = REQ_SET_DOMAIN; | ||
4830 | req.sd = sd; | ||
4831 | list_add(&req.list, &rq->migration_queue); | ||
4832 | local = 0; | ||
4833 | } | ||
4834 | |||
4835 | spin_unlock_irqrestore(&rq->lock, flags); | ||
4836 | |||
4837 | if (!local) { | ||
4838 | wake_up_process(rq->migration_thread); | ||
4839 | wait_for_completion(&req.done); | ||
4840 | } | ||
4841 | } | 4810 | } |
4842 | 4811 | ||
4843 | /* cpus with isolated domains */ | 4812 | /* cpus with isolated domains */ |
@@ -5112,6 +5081,7 @@ static int update_sched_domains(struct notifier_block *nfb, | |||
5112 | case CPU_DOWN_PREPARE: | 5081 | case CPU_DOWN_PREPARE: |
5113 | for_each_online_cpu(i) | 5082 | for_each_online_cpu(i) |
5114 | cpu_attach_domain(NULL, i); | 5083 | cpu_attach_domain(NULL, i); |
5084 | synchronize_kernel(); | ||
5115 | arch_destroy_sched_domains(); | 5085 | arch_destroy_sched_domains(); |
5116 | return NOTIFY_OK; | 5086 | return NOTIFY_OK; |
5117 | 5087 | ||