diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2012-09-12 05:22:00 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-09-13 10:52:04 -0400 |
commit | f3e947867478af9a12b9956bcd000ac7613a8a95 (patch) | |
tree | 63fabb89439447e0f72c465e8b8d0852e9deff08 | |
parent | 5ed4f1d96deee82ee92cd1ac1e0108c27e80e9b0 (diff) |
sched: Remove __ARCH_WANT_INTERRUPTS_ON_CTXSW
Now that the last architecture to use this has stopped doing so (ARM,
thanks Catalin!) we can remove this complexity from the scheduler
core.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Link: http://lkml.kernel.org/n/tip-g9p2a1w81xxbrze25v9zpzbf@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | Documentation/scheduler/sched-arch.txt | 10 | ||||
-rw-r--r-- | include/linux/sched.h | 5 | ||||
-rw-r--r-- | kernel/fork.c | 4 | ||||
-rw-r--r-- | kernel/sched/core.c | 40 | ||||
-rw-r--r-- | kernel/sched/rt.c | 5 | ||||
-rw-r--r-- | kernel/sched/sched.h | 6 |
6 files changed, 1 insertions, 69 deletions
diff --git a/Documentation/scheduler/sched-arch.txt b/Documentation/scheduler/sched-arch.txt index 28aa1075e291..b1b8587b86f0 100644 --- a/Documentation/scheduler/sched-arch.txt +++ b/Documentation/scheduler/sched-arch.txt | |||
@@ -17,16 +17,6 @@ you must `#define __ARCH_WANT_UNLOCKED_CTXSW` in a header file | |||
17 | Unlocked context switches introduce only a very minor performance | 17 | Unlocked context switches introduce only a very minor performance |
18 | penalty to the core scheduler implementation in the CONFIG_SMP case. | 18 | penalty to the core scheduler implementation in the CONFIG_SMP case. |
19 | 19 | ||
20 | 2. Interrupt status | ||
21 | By default, the switch_to arch function is called with interrupts | ||
22 | disabled. Interrupts may be enabled over the call if it is likely to | ||
23 | introduce a significant interrupt latency by adding the line | ||
24 | `#define __ARCH_WANT_INTERRUPTS_ON_CTXSW` in the same place as for | ||
25 | unlocked context switches. This define also implies | ||
26 | `__ARCH_WANT_UNLOCKED_CTXSW`. See arch/arm/include/asm/system.h for an | ||
27 | example. | ||
28 | |||
29 | |||
30 | CPU idle | 20 | CPU idle |
31 | ======== | 21 | ======== |
32 | Your cpu_idle routines need to obey the following rules: | 22 | Your cpu_idle routines need to obey the following rules: |
diff --git a/include/linux/sched.h b/include/linux/sched.h index f3eebc121ebc..60e5e38eee2a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -678,11 +678,6 @@ struct signal_struct { | |||
678 | * (notably. ptrace) */ | 678 | * (notably. ptrace) */ |
679 | }; | 679 | }; |
680 | 680 | ||
681 | /* Context switch must be unlocked if interrupts are to be enabled */ | ||
682 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
683 | # define __ARCH_WANT_UNLOCKED_CTXSW | ||
684 | #endif | ||
685 | |||
686 | /* | 681 | /* |
687 | * Bits in flags field of signal_struct. | 682 | * Bits in flags field of signal_struct. |
688 | */ | 683 | */ |
diff --git a/kernel/fork.c b/kernel/fork.c index 2c8857e12855..743d48f4d711 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1280,11 +1280,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1280 | #endif | 1280 | #endif |
1281 | #ifdef CONFIG_TRACE_IRQFLAGS | 1281 | #ifdef CONFIG_TRACE_IRQFLAGS |
1282 | p->irq_events = 0; | 1282 | p->irq_events = 0; |
1283 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
1284 | p->hardirqs_enabled = 1; | ||
1285 | #else | ||
1286 | p->hardirqs_enabled = 0; | 1283 | p->hardirqs_enabled = 0; |
1287 | #endif | ||
1288 | p->hardirq_enable_ip = 0; | 1284 | p->hardirq_enable_ip = 0; |
1289 | p->hardirq_enable_event = 0; | 1285 | p->hardirq_enable_event = 0; |
1290 | p->hardirq_disable_ip = _THIS_IP_; | 1286 | p->hardirq_disable_ip = _THIS_IP_; |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index c46a011ce5db..8b51b2d9b1fd 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1361,25 +1361,6 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu) | |||
1361 | smp_send_reschedule(cpu); | 1361 | smp_send_reschedule(cpu); |
1362 | } | 1362 | } |
1363 | 1363 | ||
1364 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
1365 | static int ttwu_activate_remote(struct task_struct *p, int wake_flags) | ||
1366 | { | ||
1367 | struct rq *rq; | ||
1368 | int ret = 0; | ||
1369 | |||
1370 | rq = __task_rq_lock(p); | ||
1371 | if (p->on_cpu) { | ||
1372 | ttwu_activate(rq, p, ENQUEUE_WAKEUP); | ||
1373 | ttwu_do_wakeup(rq, p, wake_flags); | ||
1374 | ret = 1; | ||
1375 | } | ||
1376 | __task_rq_unlock(rq); | ||
1377 | |||
1378 | return ret; | ||
1379 | |||
1380 | } | ||
1381 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ | ||
1382 | |||
1383 | bool cpus_share_cache(int this_cpu, int that_cpu) | 1364 | bool cpus_share_cache(int this_cpu, int that_cpu) |
1384 | { | 1365 | { |
1385 | return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); | 1366 | return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); |
@@ -1440,21 +1421,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) | |||
1440 | * If the owning (remote) cpu is still in the middle of schedule() with | 1421 | * If the owning (remote) cpu is still in the middle of schedule() with |
1441 | * this task as prev, wait until its done referencing the task. | 1422 | * this task as prev, wait until its done referencing the task. |
1442 | */ | 1423 | */ |
1443 | while (p->on_cpu) { | 1424 | while (p->on_cpu) |
1444 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
1445 | /* | ||
1446 | * In case the architecture enables interrupts in | ||
1447 | * context_switch(), we cannot busy wait, since that | ||
1448 | * would lead to deadlocks when an interrupt hits and | ||
1449 | * tries to wake up @prev. So bail and do a complete | ||
1450 | * remote wakeup. | ||
1451 | */ | ||
1452 | if (ttwu_activate_remote(p, wake_flags)) | ||
1453 | goto stat; | ||
1454 | #else | ||
1455 | cpu_relax(); | 1425 | cpu_relax(); |
1456 | #endif | ||
1457 | } | ||
1458 | /* | 1426 | /* |
1459 | * Pairs with the smp_wmb() in finish_lock_switch(). | 1427 | * Pairs with the smp_wmb() in finish_lock_switch(). |
1460 | */ | 1428 | */ |
@@ -1798,13 +1766,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
1798 | prev_state = prev->state; | 1766 | prev_state = prev->state; |
1799 | account_switch_vtime(prev); | 1767 | account_switch_vtime(prev); |
1800 | finish_arch_switch(prev); | 1768 | finish_arch_switch(prev); |
1801 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
1802 | local_irq_disable(); | ||
1803 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ | ||
1804 | perf_event_task_sched_in(prev, current); | 1769 | perf_event_task_sched_in(prev, current); |
1805 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
1806 | local_irq_enable(); | ||
1807 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ | ||
1808 | finish_lock_switch(rq, prev); | 1770 | finish_lock_switch(rq, prev); |
1809 | finish_arch_post_lock_switch(); | 1771 | finish_arch_post_lock_switch(); |
1810 | 1772 | ||
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index e0b7ba9c040f..418feb01344e 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -1632,11 +1632,6 @@ static int push_rt_task(struct rq *rq) | |||
1632 | if (!next_task) | 1632 | if (!next_task) |
1633 | return 0; | 1633 | return 0; |
1634 | 1634 | ||
1635 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
1636 | if (unlikely(task_running(rq, next_task))) | ||
1637 | return 0; | ||
1638 | #endif | ||
1639 | |||
1640 | retry: | 1635 | retry: |
1641 | if (unlikely(next_task == rq->curr)) { | 1636 | if (unlikely(next_task == rq->curr)) { |
1642 | WARN_ON(1); | 1637 | WARN_ON(1); |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 09871698e80c..7a7db09cfabc 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -737,11 +737,7 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) | |||
737 | */ | 737 | */ |
738 | next->on_cpu = 1; | 738 | next->on_cpu = 1; |
739 | #endif | 739 | #endif |
740 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
741 | raw_spin_unlock_irq(&rq->lock); | ||
742 | #else | ||
743 | raw_spin_unlock(&rq->lock); | 740 | raw_spin_unlock(&rq->lock); |
744 | #endif | ||
745 | } | 741 | } |
746 | 742 | ||
747 | static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | 743 | static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) |
@@ -755,9 +751,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | |||
755 | smp_wmb(); | 751 | smp_wmb(); |
756 | prev->on_cpu = 0; | 752 | prev->on_cpu = 0; |
757 | #endif | 753 | #endif |
758 | #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
759 | local_irq_enable(); | 754 | local_irq_enable(); |
760 | #endif | ||
761 | } | 755 | } |
762 | #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ | 756 | #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ |
763 | 757 | ||