diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2015-10-31 03:59:01 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2015-12-07 20:01:31 -0500 |
commit | 7c9906ca5e582a773fff696975e312cef58a7386 (patch) | |
tree | 1155e2ed10709b9d3efa87b2f4b826cc886e84eb | |
parent | d117c8aa1d511f76401337620b9c4ffb4c886579 (diff) |
rcu: Don't redundantly disable irqs in rcu_irq_{enter,exit}()
This commit replaces a local_irq_save()/local_irq_restore() pair with
a lockdep assertion that interrupts are already disabled. This should
remove the corresponding overhead from the interrupt entry/exit fastpaths.
This change was inspired by the fact that Iftekhar Ahmed's mutation
testing showed that removing rcu_irq_enter()'s call to local_ird_restore()
had no effect, which might indicate that interrupts were always enabled
anyway.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r-- | include/linux/rcupdate.h | 4 | ||||
-rw-r--r-- | include/linux/rcutiny.h | 8 | ||||
-rw-r--r-- | include/linux/rcutree.h | 2 | ||||
-rw-r--r-- | include/linux/tracepoint.h | 4 | ||||
-rw-r--r-- | kernel/rcu/tree.c | 32 |
5 files changed, 40 insertions, 10 deletions
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index a0189ba67fde..f2b667df1131 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -379,9 +379,9 @@ static inline void rcu_init_nohz(void) | |||
379 | */ | 379 | */ |
380 | #define RCU_NONIDLE(a) \ | 380 | #define RCU_NONIDLE(a) \ |
381 | do { \ | 381 | do { \ |
382 | rcu_irq_enter(); \ | 382 | rcu_irq_enter_irqson(); \ |
383 | do { a; } while (0); \ | 383 | do { a; } while (0); \ |
384 | rcu_irq_exit(); \ | 384 | rcu_irq_exit_irqson(); \ |
385 | } while (0) | 385 | } while (0) |
386 | 386 | ||
387 | /* | 387 | /* |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 4c1aaf9cce7b..64809aea661c 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
@@ -181,6 +181,14 @@ static inline void rcu_irq_enter(void) | |||
181 | { | 181 | { |
182 | } | 182 | } |
183 | 183 | ||
184 | static inline void rcu_irq_exit_irqson(void) | ||
185 | { | ||
186 | } | ||
187 | |||
188 | static inline void rcu_irq_enter_irqson(void) | ||
189 | { | ||
190 | } | ||
191 | |||
184 | static inline void rcu_irq_exit(void) | 192 | static inline void rcu_irq_exit(void) |
185 | { | 193 | { |
186 | } | 194 | } |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 9d3eda39bcd2..ad1eda9fa4da 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -97,6 +97,8 @@ void rcu_idle_enter(void); | |||
97 | void rcu_idle_exit(void); | 97 | void rcu_idle_exit(void); |
98 | void rcu_irq_enter(void); | 98 | void rcu_irq_enter(void); |
99 | void rcu_irq_exit(void); | 99 | void rcu_irq_exit(void); |
100 | void rcu_irq_enter_irqson(void); | ||
101 | void rcu_irq_exit_irqson(void); | ||
100 | 102 | ||
101 | void exit_rcu(void); | 103 | void exit_rcu(void); |
102 | 104 | ||
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 696a339c592c..7834a8a8bf1e 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -171,8 +171,8 @@ extern void syscall_unregfunc(void); | |||
171 | TP_PROTO(data_proto), \ | 171 | TP_PROTO(data_proto), \ |
172 | TP_ARGS(data_args), \ | 172 | TP_ARGS(data_args), \ |
173 | TP_CONDITION(cond), \ | 173 | TP_CONDITION(cond), \ |
174 | rcu_irq_enter(), \ | 174 | rcu_irq_enter_irqson(), \ |
175 | rcu_irq_exit()); \ | 175 | rcu_irq_exit_irqson()); \ |
176 | } | 176 | } |
177 | #else | 177 | #else |
178 | #define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) | 178 | #define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) |
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index d6863bceeb45..40940b0d0310 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -732,7 +732,7 @@ void rcu_user_enter(void) | |||
732 | * | 732 | * |
733 | * Exit from an interrupt handler, which might possibly result in entering | 733 | * Exit from an interrupt handler, which might possibly result in entering |
734 | * idle mode, in other words, leaving the mode in which read-side critical | 734 | * idle mode, in other words, leaving the mode in which read-side critical |
735 | * sections can occur. | 735 | * sections can occur. The caller must have disabled interrupts. |
736 | * | 736 | * |
737 | * This code assumes that the idle loop never does anything that might | 737 | * This code assumes that the idle loop never does anything that might |
738 | * result in unbalanced calls to irq_enter() and irq_exit(). If your | 738 | * result in unbalanced calls to irq_enter() and irq_exit(). If your |
@@ -745,11 +745,10 @@ void rcu_user_enter(void) | |||
745 | */ | 745 | */ |
746 | void rcu_irq_exit(void) | 746 | void rcu_irq_exit(void) |
747 | { | 747 | { |
748 | unsigned long flags; | ||
749 | long long oldval; | 748 | long long oldval; |
750 | struct rcu_dynticks *rdtp; | 749 | struct rcu_dynticks *rdtp; |
751 | 750 | ||
752 | local_irq_save(flags); | 751 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!"); |
753 | rdtp = this_cpu_ptr(&rcu_dynticks); | 752 | rdtp = this_cpu_ptr(&rcu_dynticks); |
754 | oldval = rdtp->dynticks_nesting; | 753 | oldval = rdtp->dynticks_nesting; |
755 | rdtp->dynticks_nesting--; | 754 | rdtp->dynticks_nesting--; |
@@ -760,6 +759,17 @@ void rcu_irq_exit(void) | |||
760 | else | 759 | else |
761 | rcu_eqs_enter_common(oldval, true); | 760 | rcu_eqs_enter_common(oldval, true); |
762 | rcu_sysidle_enter(1); | 761 | rcu_sysidle_enter(1); |
762 | } | ||
763 | |||
764 | /* | ||
765 | * Wrapper for rcu_irq_exit() where interrupts are enabled. | ||
766 | */ | ||
767 | void rcu_irq_exit_irqson(void) | ||
768 | { | ||
769 | unsigned long flags; | ||
770 | |||
771 | local_irq_save(flags); | ||
772 | rcu_irq_exit(); | ||
763 | local_irq_restore(flags); | 773 | local_irq_restore(flags); |
764 | } | 774 | } |
765 | 775 | ||
@@ -857,7 +867,7 @@ void rcu_user_exit(void) | |||
857 | * | 867 | * |
858 | * Enter an interrupt handler, which might possibly result in exiting | 868 | * Enter an interrupt handler, which might possibly result in exiting |
859 | * idle mode, in other words, entering the mode in which read-side critical | 869 | * idle mode, in other words, entering the mode in which read-side critical |
860 | * sections can occur. | 870 | * sections can occur. The caller must have disabled interrupts. |
861 | * | 871 | * |
862 | * Note that the Linux kernel is fully capable of entering an interrupt | 872 | * Note that the Linux kernel is fully capable of entering an interrupt |
863 | * handler that it never exits, for example when doing upcalls to | 873 | * handler that it never exits, for example when doing upcalls to |
@@ -873,11 +883,10 @@ void rcu_user_exit(void) | |||
873 | */ | 883 | */ |
874 | void rcu_irq_enter(void) | 884 | void rcu_irq_enter(void) |
875 | { | 885 | { |
876 | unsigned long flags; | ||
877 | struct rcu_dynticks *rdtp; | 886 | struct rcu_dynticks *rdtp; |
878 | long long oldval; | 887 | long long oldval; |
879 | 888 | ||
880 | local_irq_save(flags); | 889 | RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!"); |
881 | rdtp = this_cpu_ptr(&rcu_dynticks); | 890 | rdtp = this_cpu_ptr(&rcu_dynticks); |
882 | oldval = rdtp->dynticks_nesting; | 891 | oldval = rdtp->dynticks_nesting; |
883 | rdtp->dynticks_nesting++; | 892 | rdtp->dynticks_nesting++; |
@@ -888,6 +897,17 @@ void rcu_irq_enter(void) | |||
888 | else | 897 | else |
889 | rcu_eqs_exit_common(oldval, true); | 898 | rcu_eqs_exit_common(oldval, true); |
890 | rcu_sysidle_exit(1); | 899 | rcu_sysidle_exit(1); |
900 | } | ||
901 | |||
902 | /* | ||
903 | * Wrapper for rcu_irq_enter() where interrupts are enabled. | ||
904 | */ | ||
905 | void rcu_irq_enter_irqson(void) | ||
906 | { | ||
907 | unsigned long flags; | ||
908 | |||
909 | local_irq_save(flags); | ||
910 | rcu_irq_enter(); | ||
891 | local_irq_restore(flags); | 911 | local_irq_restore(flags); |
892 | } | 912 | } |
893 | 913 | ||