aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2012-02-28 14:46:04 -0500
committerJohn Stultz <john.stultz@linaro.org>2012-03-15 21:17:58 -0400
commit2ab516575f2f273b19d95140d02c54612201e80a (patch)
tree26db92249e65754d99759af21fd207507a81cec8
parent6c260d586343f7f78239d90aa9e2cfed02f74ff3 (diff)
x86: vdso: Use seqcount instead of seqlock
The update of the vdso data happens under xtime_lock, so adding a nested lock is pointless. Just use a seqcount to sync the readers. Reviewed-by: Andy Lutomirski <luto@amacapital.net> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: John Stultz <john.stultz@linaro.org>
-rw-r--r--arch/x86/include/asm/vgtod.h2
-rw-r--r--arch/x86/kernel/vsyscall_64.c11
-rw-r--r--arch/x86/vdso/vclock_gettime.c16
3 files changed, 12 insertions, 17 deletions
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index 815285bcaceb..1f007178c813 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -5,7 +5,7 @@
5#include <linux/clocksource.h> 5#include <linux/clocksource.h>
6 6
7struct vsyscall_gtod_data { 7struct vsyscall_gtod_data {
8 seqlock_t lock; 8 seqcount_t seq;
9 9
10 /* open coded 'struct timespec' */ 10 /* open coded 'struct timespec' */
11 time_t wall_time_sec; 11 time_t wall_time_sec;
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 33385c18e5d3..cdc95a707cd1 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -52,10 +52,7 @@
52#include "vsyscall_trace.h" 52#include "vsyscall_trace.h"
53 53
54DEFINE_VVAR(int, vgetcpu_mode); 54DEFINE_VVAR(int, vgetcpu_mode);
55DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = 55DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
56{
57 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
58};
59 56
60static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE; 57static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
61 58
@@ -86,9 +83,7 @@ void update_vsyscall_tz(void)
86void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, 83void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
87 struct clocksource *clock, u32 mult) 84 struct clocksource *clock, u32 mult)
88{ 85{
89 unsigned long flags; 86 write_seqcount_begin(&vsyscall_gtod_data.seq);
90
91 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
92 87
93 /* copy vsyscall data */ 88 /* copy vsyscall data */
94 vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode; 89 vsyscall_gtod_data.clock.vclock_mode = clock->archdata.vclock_mode;
@@ -101,7 +96,7 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
101 vsyscall_gtod_data.wall_to_monotonic = *wtm; 96 vsyscall_gtod_data.wall_to_monotonic = *wtm;
102 vsyscall_gtod_data.wall_time_coarse = __current_kernel_time(); 97 vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
103 98
104 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags); 99 write_seqcount_end(&vsyscall_gtod_data.seq);
105} 100}
106 101
107static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, 102static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 7eeb1f6188ee..944c5e5d6b6a 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -100,12 +100,12 @@ notrace static noinline int do_realtime(struct timespec *ts)
100 int mode; 100 int mode;
101 101
102 do { 102 do {
103 seq = read_seqbegin(&gtod->lock); 103 seq = read_seqcount_begin(&gtod->seq);
104 mode = gtod->clock.vclock_mode; 104 mode = gtod->clock.vclock_mode;
105 ts->tv_sec = gtod->wall_time_sec; 105 ts->tv_sec = gtod->wall_time_sec;
106 ts->tv_nsec = gtod->wall_time_nsec; 106 ts->tv_nsec = gtod->wall_time_nsec;
107 ns = vgetns(); 107 ns = vgetns();
108 } while (unlikely(read_seqretry(&gtod->lock, seq))); 108 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
109 109
110 timespec_add_ns(ts, ns); 110 timespec_add_ns(ts, ns);
111 return mode; 111 return mode;
@@ -117,13 +117,13 @@ notrace static noinline int do_monotonic(struct timespec *ts)
117 int mode; 117 int mode;
118 118
119 do { 119 do {
120 seq = read_seqbegin(&gtod->lock); 120 seq = read_seqcount_begin(&gtod->seq);
121 mode = gtod->clock.vclock_mode; 121 mode = gtod->clock.vclock_mode;
122 secs = gtod->wall_time_sec; 122 secs = gtod->wall_time_sec;
123 ns = gtod->wall_time_nsec + vgetns(); 123 ns = gtod->wall_time_nsec + vgetns();
124 secs += gtod->wall_to_monotonic.tv_sec; 124 secs += gtod->wall_to_monotonic.tv_sec;
125 ns += gtod->wall_to_monotonic.tv_nsec; 125 ns += gtod->wall_to_monotonic.tv_nsec;
126 } while (unlikely(read_seqretry(&gtod->lock, seq))); 126 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
127 127
128 /* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec 128 /* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec
129 * are all guaranteed to be nonnegative. 129 * are all guaranteed to be nonnegative.
@@ -142,10 +142,10 @@ notrace static noinline int do_realtime_coarse(struct timespec *ts)
142{ 142{
143 unsigned long seq; 143 unsigned long seq;
144 do { 144 do {
145 seq = read_seqbegin(&gtod->lock); 145 seq = read_seqcount_begin(&gtod->seq);
146 ts->tv_sec = gtod->wall_time_coarse.tv_sec; 146 ts->tv_sec = gtod->wall_time_coarse.tv_sec;
147 ts->tv_nsec = gtod->wall_time_coarse.tv_nsec; 147 ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
148 } while (unlikely(read_seqretry(&gtod->lock, seq))); 148 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
149 return 0; 149 return 0;
150} 150}
151 151
@@ -153,12 +153,12 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
153{ 153{
154 unsigned long seq, ns, secs; 154 unsigned long seq, ns, secs;
155 do { 155 do {
156 seq = read_seqbegin(&gtod->lock); 156 seq = read_seqcount_begin(&gtod->seq);
157 secs = gtod->wall_time_coarse.tv_sec; 157 secs = gtod->wall_time_coarse.tv_sec;
158 ns = gtod->wall_time_coarse.tv_nsec; 158 ns = gtod->wall_time_coarse.tv_nsec;
159 secs += gtod->wall_to_monotonic.tv_sec; 159 secs += gtod->wall_to_monotonic.tv_sec;
160 ns += gtod->wall_to_monotonic.tv_nsec; 160 ns += gtod->wall_to_monotonic.tv_nsec;
161 } while (unlikely(read_seqretry(&gtod->lock, seq))); 161 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
162 162
163 /* wall_time_nsec and wall_to_monotonic.tv_nsec are 163 /* wall_time_nsec and wall_to_monotonic.tv_nsec are
164 * guaranteed to be between 0 and NSEC_PER_SEC. 164 * guaranteed to be between 0 and NSEC_PER_SEC.