diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2016-10-22 07:07:37 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2016-10-25 10:32:50 -0400 |
commit | 6bad6bccf2d717f652d37e63cf261eaa23466009 (patch) | |
tree | 9a22c3c4a778297bf544a0cc89da0c76c8784d6b | |
parent | 041ad7bc758db259bb960ef795197dd14aab19a6 (diff) |
timers: Prevent base clock corruption when forwarding
When a timer is enqueued we try to forward the timer base clock. This
mechanism has two issues:
1) Forwarding a remote base unlocked
The forwarding function is called from get_target_base() with the current
timer base lock held. But if the new target base is a different base than
the current base (can happen with NOHZ, sigh!) then the forwarding is done
on an unlocked base. This can lead to corruption of base->clk.
Solution is simple: Invoke the forwarding after the target base is locked.
2) Possible corruption due to jiffies advancing
This is similar to the issue in get_net_timer_interrupt() which was fixed
in the previous patch. jiffies can advance between check and assignement
and therefore advancing base->clk beyond the next expiry value.
So we need to read jiffies into a local variable once and do the checks and
assignment with the local copy.
Fixes: a683f390b93f("timers: Forward the wheel clock whenever possible")
Reported-by: Ashton Holmes <scoopta@gmail.com>
Reported-by: Michael Thayer <michael.thayer@oracle.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Michal Necasek <michal.necasek@oracle.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: knut.osmundsen@oracle.com
Cc: stable@vger.kernel.org
Cc: stern@rowland.harvard.edu
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20161022110552.253640125@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | kernel/time/timer.c | 23 |
1 files changed, 10 insertions, 13 deletions
diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 7c446fb5163a..c611c47de884 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c | |||
@@ -878,7 +878,7 @@ static inline struct timer_base *get_timer_base(u32 tflags) | |||
878 | 878 | ||
879 | #ifdef CONFIG_NO_HZ_COMMON | 879 | #ifdef CONFIG_NO_HZ_COMMON |
880 | static inline struct timer_base * | 880 | static inline struct timer_base * |
881 | __get_target_base(struct timer_base *base, unsigned tflags) | 881 | get_target_base(struct timer_base *base, unsigned tflags) |
882 | { | 882 | { |
883 | #ifdef CONFIG_SMP | 883 | #ifdef CONFIG_SMP |
884 | if ((tflags & TIMER_PINNED) || !base->migration_enabled) | 884 | if ((tflags & TIMER_PINNED) || !base->migration_enabled) |
@@ -891,25 +891,27 @@ __get_target_base(struct timer_base *base, unsigned tflags) | |||
891 | 891 | ||
892 | static inline void forward_timer_base(struct timer_base *base) | 892 | static inline void forward_timer_base(struct timer_base *base) |
893 | { | 893 | { |
894 | unsigned long jnow = READ_ONCE(jiffies); | ||
895 | |||
894 | /* | 896 | /* |
895 | * We only forward the base when it's idle and we have a delta between | 897 | * We only forward the base when it's idle and we have a delta between |
896 | * base clock and jiffies. | 898 | * base clock and jiffies. |
897 | */ | 899 | */ |
898 | if (!base->is_idle || (long) (jiffies - base->clk) < 2) | 900 | if (!base->is_idle || (long) (jnow - base->clk) < 2) |
899 | return; | 901 | return; |
900 | 902 | ||
901 | /* | 903 | /* |
902 | * If the next expiry value is > jiffies, then we fast forward to | 904 | * If the next expiry value is > jiffies, then we fast forward to |
903 | * jiffies otherwise we forward to the next expiry value. | 905 | * jiffies otherwise we forward to the next expiry value. |
904 | */ | 906 | */ |
905 | if (time_after(base->next_expiry, jiffies)) | 907 | if (time_after(base->next_expiry, jnow)) |
906 | base->clk = jiffies; | 908 | base->clk = jnow; |
907 | else | 909 | else |
908 | base->clk = base->next_expiry; | 910 | base->clk = base->next_expiry; |
909 | } | 911 | } |
910 | #else | 912 | #else |
911 | static inline struct timer_base * | 913 | static inline struct timer_base * |
912 | __get_target_base(struct timer_base *base, unsigned tflags) | 914 | get_target_base(struct timer_base *base, unsigned tflags) |
913 | { | 915 | { |
914 | return get_timer_this_cpu_base(tflags); | 916 | return get_timer_this_cpu_base(tflags); |
915 | } | 917 | } |
@@ -917,14 +919,6 @@ __get_target_base(struct timer_base *base, unsigned tflags) | |||
917 | static inline void forward_timer_base(struct timer_base *base) { } | 919 | static inline void forward_timer_base(struct timer_base *base) { } |
918 | #endif | 920 | #endif |
919 | 921 | ||
920 | static inline struct timer_base * | ||
921 | get_target_base(struct timer_base *base, unsigned tflags) | ||
922 | { | ||
923 | struct timer_base *target = __get_target_base(base, tflags); | ||
924 | |||
925 | forward_timer_base(target); | ||
926 | return target; | ||
927 | } | ||
928 | 922 | ||
929 | /* | 923 | /* |
930 | * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means | 924 | * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means |
@@ -1037,6 +1031,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) | |||
1037 | } | 1031 | } |
1038 | } | 1032 | } |
1039 | 1033 | ||
1034 | /* Try to forward a stale timer base clock */ | ||
1035 | forward_timer_base(base); | ||
1036 | |||
1040 | timer->expires = expires; | 1037 | timer->expires = expires; |
1041 | /* | 1038 | /* |
1042 | * If 'idx' was calculated above and the base time did not advance | 1039 | * If 'idx' was calculated above and the base time did not advance |