diff options
-rw-r--r-- | arch/Kconfig | 15 | ||||
-rw-r--r-- | arch/x86/Kconfig | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/context_tracking.h (renamed from arch/x86/include/asm/rcu.h) | 15 | ||||
-rw-r--r-- | arch/x86/kernel/entry_64.S | 2 | ||||
-rw-r--r-- | arch/x86/kernel/ptrace.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/signal.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/traps.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/fault.c | 2 | ||||
-rw-r--r-- | include/linux/context_tracking.h | 18 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 2 | ||||
-rw-r--r-- | init/Kconfig | 28 | ||||
-rw-r--r-- | kernel/Makefile | 1 | ||||
-rw-r--r-- | kernel/context_tracking.c | 83 | ||||
-rw-r--r-- | kernel/rcutree.c | 64 | ||||
-rw-r--r-- | kernel/sched/core.c | 11 |
15 files changed, 150 insertions, 108 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index 366ec06a5185..cc74aaea116c 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -300,15 +300,16 @@ config SECCOMP_FILTER | |||
300 | 300 | ||
301 | See Documentation/prctl/seccomp_filter.txt for details. | 301 | See Documentation/prctl/seccomp_filter.txt for details. |
302 | 302 | ||
303 | config HAVE_RCU_USER_QS | 303 | config HAVE_CONTEXT_TRACKING |
304 | bool | 304 | bool |
305 | help | 305 | help |
306 | Provide kernel entry/exit hooks necessary for userspace | 306 | Provide kernel/user boundaries probes necessary for subsystems |
307 | RCU extended quiescent state. Syscalls need to be wrapped inside | 307 | that need it, such as userspace RCU extended quiescent state. |
308 | rcu_user_exit()-rcu_user_enter() through the slow path using | 308 | Syscalls need to be wrapped inside user_exit()-user_enter() through |
309 | TIF_NOHZ flag. Exceptions handlers must be wrapped as well. Irqs | 309 | the slow path using TIF_NOHZ flag. Exceptions handlers must be |
310 | are already protected inside rcu_irq_enter/rcu_irq_exit() but | 310 | wrapped as well. Irqs are already protected inside |
311 | preemption or signal handling on irq exit still need to be protected. | 311 | rcu_irq_enter/rcu_irq_exit() but preemption or signal handling on |
312 | irq exit still need to be protected. | ||
312 | 313 | ||
313 | config HAVE_VIRT_CPU_ACCOUNTING | 314 | config HAVE_VIRT_CPU_ACCOUNTING |
314 | bool | 315 | bool |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 46c3bff3ced2..110cfad24f26 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -106,7 +106,7 @@ config X86 | |||
106 | select KTIME_SCALAR if X86_32 | 106 | select KTIME_SCALAR if X86_32 |
107 | select GENERIC_STRNCPY_FROM_USER | 107 | select GENERIC_STRNCPY_FROM_USER |
108 | select GENERIC_STRNLEN_USER | 108 | select GENERIC_STRNLEN_USER |
109 | select HAVE_RCU_USER_QS if X86_64 | 109 | select HAVE_CONTEXT_TRACKING if X86_64 |
110 | select HAVE_IRQ_TIME_ACCOUNTING | 110 | select HAVE_IRQ_TIME_ACCOUNTING |
111 | select GENERIC_KERNEL_THREAD | 111 | select GENERIC_KERNEL_THREAD |
112 | select GENERIC_KERNEL_EXECVE | 112 | select GENERIC_KERNEL_EXECVE |
diff --git a/arch/x86/include/asm/rcu.h b/arch/x86/include/asm/context_tracking.h index d1ac07a23979..1616562683e9 100644 --- a/arch/x86/include/asm/rcu.h +++ b/arch/x86/include/asm/context_tracking.h | |||
@@ -1,27 +1,26 @@ | |||
1 | #ifndef _ASM_X86_RCU_H | 1 | #ifndef _ASM_X86_CONTEXT_TRACKING_H |
2 | #define _ASM_X86_RCU_H | 2 | #define _ASM_X86_CONTEXT_TRACKING_H |
3 | 3 | ||
4 | #ifndef __ASSEMBLY__ | 4 | #ifndef __ASSEMBLY__ |
5 | 5 | #include <linux/context_tracking.h> | |
6 | #include <linux/rcupdate.h> | ||
7 | #include <asm/ptrace.h> | 6 | #include <asm/ptrace.h> |
8 | 7 | ||
9 | static inline void exception_enter(struct pt_regs *regs) | 8 | static inline void exception_enter(struct pt_regs *regs) |
10 | { | 9 | { |
11 | rcu_user_exit(); | 10 | user_exit(); |
12 | } | 11 | } |
13 | 12 | ||
14 | static inline void exception_exit(struct pt_regs *regs) | 13 | static inline void exception_exit(struct pt_regs *regs) |
15 | { | 14 | { |
16 | #ifdef CONFIG_RCU_USER_QS | 15 | #ifdef CONFIG_CONTEXT_TRACKING |
17 | if (user_mode(regs)) | 16 | if (user_mode(regs)) |
18 | rcu_user_enter(); | 17 | user_enter(); |
19 | #endif | 18 | #endif |
20 | } | 19 | } |
21 | 20 | ||
22 | #else /* __ASSEMBLY__ */ | 21 | #else /* __ASSEMBLY__ */ |
23 | 22 | ||
24 | #ifdef CONFIG_RCU_USER_QS | 23 | #ifdef CONFIG_CONTEXT_TRACKING |
25 | # define SCHEDULE_USER call schedule_user | 24 | # define SCHEDULE_USER call schedule_user |
26 | #else | 25 | #else |
27 | # define SCHEDULE_USER call schedule | 26 | # define SCHEDULE_USER call schedule |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 0c58952d64e8..98faeb30139d 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -56,7 +56,7 @@ | |||
56 | #include <asm/ftrace.h> | 56 | #include <asm/ftrace.h> |
57 | #include <asm/percpu.h> | 57 | #include <asm/percpu.h> |
58 | #include <asm/asm.h> | 58 | #include <asm/asm.h> |
59 | #include <asm/rcu.h> | 59 | #include <asm/context_tracking.h> |
60 | #include <asm/smap.h> | 60 | #include <asm/smap.h> |
61 | #include <linux/err.h> | 61 | #include <linux/err.h> |
62 | 62 | ||
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index eff5b8c68652..65b88a5dc1a8 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <linux/signal.h> | 21 | #include <linux/signal.h> |
22 | #include <linux/perf_event.h> | 22 | #include <linux/perf_event.h> |
23 | #include <linux/hw_breakpoint.h> | 23 | #include <linux/hw_breakpoint.h> |
24 | #include <linux/rcupdate.h> | 24 | #include <linux/context_tracking.h> |
25 | 25 | ||
26 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
27 | #include <asm/pgtable.h> | 27 | #include <asm/pgtable.h> |
@@ -1461,7 +1461,7 @@ long syscall_trace_enter(struct pt_regs *regs) | |||
1461 | { | 1461 | { |
1462 | long ret = 0; | 1462 | long ret = 0; |
1463 | 1463 | ||
1464 | rcu_user_exit(); | 1464 | user_exit(); |
1465 | 1465 | ||
1466 | /* | 1466 | /* |
1467 | * If we stepped into a sysenter/syscall insn, it trapped in | 1467 | * If we stepped into a sysenter/syscall insn, it trapped in |
@@ -1516,7 +1516,7 @@ void syscall_trace_leave(struct pt_regs *regs) | |||
1516 | * or do_notify_resume(), in which case we can be in RCU | 1516 | * or do_notify_resume(), in which case we can be in RCU |
1517 | * user mode. | 1517 | * user mode. |
1518 | */ | 1518 | */ |
1519 | rcu_user_exit(); | 1519 | user_exit(); |
1520 | 1520 | ||
1521 | audit_syscall_exit(regs); | 1521 | audit_syscall_exit(regs); |
1522 | 1522 | ||
@@ -1534,5 +1534,5 @@ void syscall_trace_leave(struct pt_regs *regs) | |||
1534 | if (step || test_thread_flag(TIF_SYSCALL_TRACE)) | 1534 | if (step || test_thread_flag(TIF_SYSCALL_TRACE)) |
1535 | tracehook_report_syscall_exit(regs, step); | 1535 | tracehook_report_syscall_exit(regs, step); |
1536 | 1536 | ||
1537 | rcu_user_enter(); | 1537 | user_enter(); |
1538 | } | 1538 | } |
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 29ad351804e9..20ecac112e74 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/uaccess.h> | 22 | #include <linux/uaccess.h> |
23 | #include <linux/user-return-notifier.h> | 23 | #include <linux/user-return-notifier.h> |
24 | #include <linux/uprobes.h> | 24 | #include <linux/uprobes.h> |
25 | #include <linux/context_tracking.h> | ||
25 | 26 | ||
26 | #include <asm/processor.h> | 27 | #include <asm/processor.h> |
27 | #include <asm/ucontext.h> | 28 | #include <asm/ucontext.h> |
@@ -816,7 +817,7 @@ static void do_signal(struct pt_regs *regs) | |||
816 | void | 817 | void |
817 | do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) | 818 | do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) |
818 | { | 819 | { |
819 | rcu_user_exit(); | 820 | user_exit(); |
820 | 821 | ||
821 | #ifdef CONFIG_X86_MCE | 822 | #ifdef CONFIG_X86_MCE |
822 | /* notify userspace of pending MCEs */ | 823 | /* notify userspace of pending MCEs */ |
@@ -840,7 +841,7 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) | |||
840 | if (thread_info_flags & _TIF_USER_RETURN_NOTIFY) | 841 | if (thread_info_flags & _TIF_USER_RETURN_NOTIFY) |
841 | fire_user_return_notifiers(); | 842 | fire_user_return_notifiers(); |
842 | 843 | ||
843 | rcu_user_enter(); | 844 | user_enter(); |
844 | } | 845 | } |
845 | 846 | ||
846 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where) | 847 | void signal_fault(struct pt_regs *regs, void __user *frame, char *where) |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 8276dc6794cc..eb8586693e0b 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -55,7 +55,7 @@ | |||
55 | #include <asm/i387.h> | 55 | #include <asm/i387.h> |
56 | #include <asm/fpu-internal.h> | 56 | #include <asm/fpu-internal.h> |
57 | #include <asm/mce.h> | 57 | #include <asm/mce.h> |
58 | #include <asm/rcu.h> | 58 | #include <asm/context_tracking.h> |
59 | 59 | ||
60 | #include <asm/mach_traps.h> | 60 | #include <asm/mach_traps.h> |
61 | 61 | ||
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 8e13ecb41bee..7a529cbab7ad 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <asm/pgalloc.h> /* pgd_*(), ... */ | 18 | #include <asm/pgalloc.h> /* pgd_*(), ... */ |
19 | #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ | 19 | #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ |
20 | #include <asm/fixmap.h> /* VSYSCALL_START */ | 20 | #include <asm/fixmap.h> /* VSYSCALL_START */ |
21 | #include <asm/rcu.h> /* exception_enter(), ... */ | 21 | #include <asm/context_tracking.h> /* exception_enter(), ... */ |
22 | 22 | ||
23 | /* | 23 | /* |
24 | * Page fault error code bits: | 24 | * Page fault error code bits: |
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h new file mode 100644 index 000000000000..e24339ccb7f0 --- /dev/null +++ b/include/linux/context_tracking.h | |||
@@ -0,0 +1,18 @@ | |||
1 | #ifndef _LINUX_CONTEXT_TRACKING_H | ||
2 | #define _LINUX_CONTEXT_TRACKING_H | ||
3 | |||
4 | #ifdef CONFIG_CONTEXT_TRACKING | ||
5 | #include <linux/sched.h> | ||
6 | |||
7 | extern void user_enter(void); | ||
8 | extern void user_exit(void); | ||
9 | extern void context_tracking_task_switch(struct task_struct *prev, | ||
10 | struct task_struct *next); | ||
11 | #else | ||
12 | static inline void user_enter(void) { } | ||
13 | static inline void user_exit(void) { } | ||
14 | static inline void context_tracking_task_switch(struct task_struct *prev, | ||
15 | struct task_struct *next) { } | ||
16 | #endif /* !CONFIG_CONTEXT_TRACKING */ | ||
17 | |||
18 | #endif | ||
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 8fe7c1840d30..275aa3f1062d 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -222,8 +222,6 @@ extern void rcu_user_enter(void); | |||
222 | extern void rcu_user_exit(void); | 222 | extern void rcu_user_exit(void); |
223 | extern void rcu_user_enter_after_irq(void); | 223 | extern void rcu_user_enter_after_irq(void); |
224 | extern void rcu_user_exit_after_irq(void); | 224 | extern void rcu_user_exit_after_irq(void); |
225 | extern void rcu_user_hooks_switch(struct task_struct *prev, | ||
226 | struct task_struct *next); | ||
227 | #else | 225 | #else |
228 | static inline void rcu_user_enter(void) { } | 226 | static inline void rcu_user_enter(void) { } |
229 | static inline void rcu_user_exit(void) { } | 227 | static inline void rcu_user_exit(void) { } |
diff --git a/init/Kconfig b/init/Kconfig index 5ac6ee094225..2054e048bb98 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -486,9 +486,13 @@ config PREEMPT_RCU | |||
486 | This option enables preemptible-RCU code that is common between | 486 | This option enables preemptible-RCU code that is common between |
487 | the TREE_PREEMPT_RCU and TINY_PREEMPT_RCU implementations. | 487 | the TREE_PREEMPT_RCU and TINY_PREEMPT_RCU implementations. |
488 | 488 | ||
489 | config CONTEXT_TRACKING | ||
490 | bool | ||
491 | |||
489 | config RCU_USER_QS | 492 | config RCU_USER_QS |
490 | bool "Consider userspace as in RCU extended quiescent state" | 493 | bool "Consider userspace as in RCU extended quiescent state" |
491 | depends on HAVE_RCU_USER_QS && SMP | 494 | depends on HAVE_CONTEXT_TRACKING && SMP |
495 | select CONTEXT_TRACKING | ||
492 | help | 496 | help |
493 | This option sets hooks on kernel / userspace boundaries and | 497 | This option sets hooks on kernel / userspace boundaries and |
494 | puts RCU in extended quiescent state when the CPU runs in | 498 | puts RCU in extended quiescent state when the CPU runs in |
@@ -497,24 +501,20 @@ config RCU_USER_QS | |||
497 | try to keep the timer tick on for RCU. | 501 | try to keep the timer tick on for RCU. |
498 | 502 | ||
499 | Unless you want to hack and help the development of the full | 503 | Unless you want to hack and help the development of the full |
500 | tickless feature, you shouldn't enable this option. It also | 504 | dynticks mode, you shouldn't enable this option. It also |
501 | adds unnecessary overhead. | 505 | adds unnecessary overhead. |
502 | 506 | ||
503 | If unsure say N | 507 | If unsure say N |
504 | 508 | ||
505 | config RCU_USER_QS_FORCE | 509 | config CONTEXT_TRACKING_FORCE |
506 | bool "Force userspace extended QS by default" | 510 | bool "Force context tracking" |
507 | depends on RCU_USER_QS | 511 | depends on CONTEXT_TRACKING |
508 | help | 512 | help |
509 | Set the hooks in user/kernel boundaries by default in order to | 513 | Probe on user/kernel boundaries by default in order to |
510 | test this feature that treats userspace as an extended quiescent | 514 | test the features that rely on it such as userspace RCU extended |
511 | state until we have a real user like a full adaptive nohz option. | 515 | quiescent states. |
512 | 516 | This test is there for debugging until we have a real user like the | |
513 | Unless you want to hack and help the development of the full | 517 | full dynticks mode. |
514 | tickless feature, you shouldn't enable this option. It adds | ||
515 | unnecessary overhead. | ||
516 | |||
517 | If unsure say N | ||
518 | 518 | ||
519 | config RCU_FANOUT | 519 | config RCU_FANOUT |
520 | int "Tree-based hierarchical RCU fanout value" | 520 | int "Tree-based hierarchical RCU fanout value" |
diff --git a/kernel/Makefile b/kernel/Makefile index 0dfeca4324ee..f90bbfc9727f 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -110,6 +110,7 @@ obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o | |||
110 | obj-$(CONFIG_PADATA) += padata.o | 110 | obj-$(CONFIG_PADATA) += padata.o |
111 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | 111 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o |
112 | obj-$(CONFIG_JUMP_LABEL) += jump_label.o | 112 | obj-$(CONFIG_JUMP_LABEL) += jump_label.o |
113 | obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o | ||
113 | 114 | ||
114 | $(obj)/configs.o: $(obj)/config_data.h | 115 | $(obj)/configs.o: $(obj)/config_data.h |
115 | 116 | ||
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c new file mode 100644 index 000000000000..e0e07fd55508 --- /dev/null +++ b/kernel/context_tracking.c | |||
@@ -0,0 +1,83 @@ | |||
1 | #include <linux/context_tracking.h> | ||
2 | #include <linux/rcupdate.h> | ||
3 | #include <linux/sched.h> | ||
4 | #include <linux/percpu.h> | ||
5 | #include <linux/hardirq.h> | ||
6 | |||
7 | struct context_tracking { | ||
8 | /* | ||
9 | * When active is false, hooks are not set to | ||
10 | * minimize overhead: TIF flags are cleared | ||
11 | * and calls to user_enter/exit are ignored. This | ||
12 | * may be further optimized using static keys. | ||
13 | */ | ||
14 | bool active; | ||
15 | enum { | ||
16 | IN_KERNEL = 0, | ||
17 | IN_USER, | ||
18 | } state; | ||
19 | }; | ||
20 | |||
21 | static DEFINE_PER_CPU(struct context_tracking, context_tracking) = { | ||
22 | #ifdef CONFIG_CONTEXT_TRACKING_FORCE | ||
23 | .active = true, | ||
24 | #endif | ||
25 | }; | ||
26 | |||
27 | void user_enter(void) | ||
28 | { | ||
29 | unsigned long flags; | ||
30 | |||
31 | /* | ||
32 | * Some contexts may involve an exception occuring in an irq, | ||
33 | * leading to that nesting: | ||
34 | * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() | ||
35 | * This would mess up the dyntick_nesting count though. And rcu_irq_*() | ||
36 | * helpers are enough to protect RCU uses inside the exception. So | ||
37 | * just return immediately if we detect we are in an IRQ. | ||
38 | */ | ||
39 | if (in_interrupt()) | ||
40 | return; | ||
41 | |||
42 | WARN_ON_ONCE(!current->mm); | ||
43 | |||
44 | local_irq_save(flags); | ||
45 | if (__this_cpu_read(context_tracking.active) && | ||
46 | __this_cpu_read(context_tracking.state) != IN_USER) { | ||
47 | __this_cpu_write(context_tracking.state, IN_USER); | ||
48 | rcu_user_enter(); | ||
49 | } | ||
50 | local_irq_restore(flags); | ||
51 | } | ||
52 | |||
53 | void user_exit(void) | ||
54 | { | ||
55 | unsigned long flags; | ||
56 | |||
57 | /* | ||
58 | * Some contexts may involve an exception occuring in an irq, | ||
59 | * leading to that nesting: | ||
60 | * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() | ||
61 | * This would mess up the dyntick_nesting count though. And rcu_irq_*() | ||
62 | * helpers are enough to protect RCU uses inside the exception. So | ||
63 | * just return immediately if we detect we are in an IRQ. | ||
64 | */ | ||
65 | if (in_interrupt()) | ||
66 | return; | ||
67 | |||
68 | local_irq_save(flags); | ||
69 | if (__this_cpu_read(context_tracking.state) == IN_USER) { | ||
70 | __this_cpu_write(context_tracking.state, IN_KERNEL); | ||
71 | rcu_user_exit(); | ||
72 | } | ||
73 | local_irq_restore(flags); | ||
74 | } | ||
75 | |||
76 | void context_tracking_task_switch(struct task_struct *prev, | ||
77 | struct task_struct *next) | ||
78 | { | ||
79 | if (__this_cpu_read(context_tracking.active)) { | ||
80 | clear_tsk_thread_flag(prev, TIF_NOHZ); | ||
81 | set_tsk_thread_flag(next, TIF_NOHZ); | ||
82 | } | ||
83 | } | ||
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 7733eb56e156..e441b77b614e 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -207,9 +207,6 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch); | |||
207 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { | 207 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { |
208 | .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, | 208 | .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, |
209 | .dynticks = ATOMIC_INIT(1), | 209 | .dynticks = ATOMIC_INIT(1), |
210 | #if defined(CONFIG_RCU_USER_QS) && !defined(CONFIG_RCU_USER_QS_FORCE) | ||
211 | .ignore_user_qs = true, | ||
212 | #endif | ||
213 | }; | 210 | }; |
214 | 211 | ||
215 | static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */ | 212 | static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */ |
@@ -420,29 +417,7 @@ EXPORT_SYMBOL_GPL(rcu_idle_enter); | |||
420 | */ | 417 | */ |
421 | void rcu_user_enter(void) | 418 | void rcu_user_enter(void) |
422 | { | 419 | { |
423 | unsigned long flags; | 420 | rcu_eqs_enter(1); |
424 | struct rcu_dynticks *rdtp; | ||
425 | |||
426 | /* | ||
427 | * Some contexts may involve an exception occuring in an irq, | ||
428 | * leading to that nesting: | ||
429 | * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() | ||
430 | * This would mess up the dyntick_nesting count though. And rcu_irq_*() | ||
431 | * helpers are enough to protect RCU uses inside the exception. So | ||
432 | * just return immediately if we detect we are in an IRQ. | ||
433 | */ | ||
434 | if (in_interrupt()) | ||
435 | return; | ||
436 | |||
437 | WARN_ON_ONCE(!current->mm); | ||
438 | |||
439 | local_irq_save(flags); | ||
440 | rdtp = &__get_cpu_var(rcu_dynticks); | ||
441 | if (!rdtp->ignore_user_qs && !rdtp->in_user) { | ||
442 | rdtp->in_user = true; | ||
443 | rcu_eqs_enter(true); | ||
444 | } | ||
445 | local_irq_restore(flags); | ||
446 | } | 421 | } |
447 | 422 | ||
448 | /** | 423 | /** |
@@ -579,27 +554,7 @@ EXPORT_SYMBOL_GPL(rcu_idle_exit); | |||
579 | */ | 554 | */ |
580 | void rcu_user_exit(void) | 555 | void rcu_user_exit(void) |
581 | { | 556 | { |
582 | unsigned long flags; | 557 | rcu_eqs_exit(1); |
583 | struct rcu_dynticks *rdtp; | ||
584 | |||
585 | /* | ||
586 | * Some contexts may involve an exception occuring in an irq, | ||
587 | * leading to that nesting: | ||
588 | * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() | ||
589 | * This would mess up the dyntick_nesting count though. And rcu_irq_*() | ||
590 | * helpers are enough to protect RCU uses inside the exception. So | ||
591 | * just return immediately if we detect we are in an IRQ. | ||
592 | */ | ||
593 | if (in_interrupt()) | ||
594 | return; | ||
595 | |||
596 | local_irq_save(flags); | ||
597 | rdtp = &__get_cpu_var(rcu_dynticks); | ||
598 | if (rdtp->in_user) { | ||
599 | rdtp->in_user = false; | ||
600 | rcu_eqs_exit(true); | ||
601 | } | ||
602 | local_irq_restore(flags); | ||
603 | } | 558 | } |
604 | 559 | ||
605 | /** | 560 | /** |
@@ -722,21 +677,6 @@ int rcu_is_cpu_idle(void) | |||
722 | } | 677 | } |
723 | EXPORT_SYMBOL(rcu_is_cpu_idle); | 678 | EXPORT_SYMBOL(rcu_is_cpu_idle); |
724 | 679 | ||
725 | #ifdef CONFIG_RCU_USER_QS | ||
726 | void rcu_user_hooks_switch(struct task_struct *prev, | ||
727 | struct task_struct *next) | ||
728 | { | ||
729 | struct rcu_dynticks *rdtp; | ||
730 | |||
731 | /* Interrupts are disabled in context switch */ | ||
732 | rdtp = &__get_cpu_var(rcu_dynticks); | ||
733 | if (!rdtp->ignore_user_qs) { | ||
734 | clear_tsk_thread_flag(prev, TIF_NOHZ); | ||
735 | set_tsk_thread_flag(next, TIF_NOHZ); | ||
736 | } | ||
737 | } | ||
738 | #endif /* #ifdef CONFIG_RCU_USER_QS */ | ||
739 | |||
740 | #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) | 680 | #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) |
741 | 681 | ||
742 | /* | 682 | /* |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 36f260864f65..80f80dfca70e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -72,6 +72,7 @@ | |||
72 | #include <linux/slab.h> | 72 | #include <linux/slab.h> |
73 | #include <linux/init_task.h> | 73 | #include <linux/init_task.h> |
74 | #include <linux/binfmts.h> | 74 | #include <linux/binfmts.h> |
75 | #include <linux/context_tracking.h> | ||
75 | 76 | ||
76 | #include <asm/switch_to.h> | 77 | #include <asm/switch_to.h> |
77 | #include <asm/tlb.h> | 78 | #include <asm/tlb.h> |
@@ -1886,8 +1887,8 @@ context_switch(struct rq *rq, struct task_struct *prev, | |||
1886 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); | 1887 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); |
1887 | #endif | 1888 | #endif |
1888 | 1889 | ||
1890 | context_tracking_task_switch(prev, next); | ||
1889 | /* Here we just switch the register state and the stack. */ | 1891 | /* Here we just switch the register state and the stack. */ |
1890 | rcu_user_hooks_switch(prev, next); | ||
1891 | switch_to(prev, next, prev); | 1892 | switch_to(prev, next, prev); |
1892 | 1893 | ||
1893 | barrier(); | 1894 | barrier(); |
@@ -2911,7 +2912,7 @@ asmlinkage void __sched schedule(void) | |||
2911 | } | 2912 | } |
2912 | EXPORT_SYMBOL(schedule); | 2913 | EXPORT_SYMBOL(schedule); |
2913 | 2914 | ||
2914 | #ifdef CONFIG_RCU_USER_QS | 2915 | #ifdef CONFIG_CONTEXT_TRACKING |
2915 | asmlinkage void __sched schedule_user(void) | 2916 | asmlinkage void __sched schedule_user(void) |
2916 | { | 2917 | { |
2917 | /* | 2918 | /* |
@@ -2920,9 +2921,9 @@ asmlinkage void __sched schedule_user(void) | |||
2920 | * we haven't yet exited the RCU idle mode. Do it here manually until | 2921 | * we haven't yet exited the RCU idle mode. Do it here manually until |
2921 | * we find a better solution. | 2922 | * we find a better solution. |
2922 | */ | 2923 | */ |
2923 | rcu_user_exit(); | 2924 | user_exit(); |
2924 | schedule(); | 2925 | schedule(); |
2925 | rcu_user_enter(); | 2926 | user_enter(); |
2926 | } | 2927 | } |
2927 | #endif | 2928 | #endif |
2928 | 2929 | ||
@@ -3027,7 +3028,7 @@ asmlinkage void __sched preempt_schedule_irq(void) | |||
3027 | /* Catch callers which need to be fixed */ | 3028 | /* Catch callers which need to be fixed */ |
3028 | BUG_ON(ti->preempt_count || !irqs_disabled()); | 3029 | BUG_ON(ti->preempt_count || !irqs_disabled()); |
3029 | 3030 | ||
3030 | rcu_user_exit(); | 3031 | user_exit(); |
3031 | do { | 3032 | do { |
3032 | add_preempt_count(PREEMPT_ACTIVE); | 3033 | add_preempt_count(PREEMPT_ACTIVE); |
3033 | local_irq_enable(); | 3034 | local_irq_enable(); |