aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/kfifo.c361
-rw-r--r--kernel/resource.c32
-rw-r--r--kernel/time.c1
-rw-r--r--kernel/time/timekeeping.c27
4 files changed, 322 insertions, 99 deletions
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
index 3765ff3c1bbe..e92d519f93b1 100644
--- a/kernel/kfifo.c
+++ b/kernel/kfifo.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * A simple kernel FIFO implementation. 2 * A generic kernel FIFO implementation.
3 * 3 *
4 * Copyright (C) 2009 Stefani Seibold <stefani@seibold.net>
4 * Copyright (C) 2004 Stelian Pop <stelian@popies.net> 5 * Copyright (C) 2004 Stelian Pop <stelian@popies.net>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -25,50 +26,48 @@
25#include <linux/err.h> 26#include <linux/err.h>
26#include <linux/kfifo.h> 27#include <linux/kfifo.h>
27#include <linux/log2.h> 28#include <linux/log2.h>
29#include <linux/uaccess.h>
30
31static void _kfifo_init(struct kfifo *fifo, unsigned char *buffer,
32 unsigned int size)
33{
34 fifo->buffer = buffer;
35 fifo->size = size;
36
37 kfifo_reset(fifo);
38}
28 39
29/** 40/**
30 * kfifo_init - allocates a new FIFO using a preallocated buffer 41 * kfifo_init - initialize a FIFO using a preallocated buffer
42 * @fifo: the fifo to assign the buffer
31 * @buffer: the preallocated buffer to be used. 43 * @buffer: the preallocated buffer to be used.
32 * @size: the size of the internal buffer, this have to be a power of 2. 44 * @size: the size of the internal buffer, this have to be a power of 2.
33 * @gfp_mask: get_free_pages mask, passed to kmalloc()
34 * @lock: the lock to be used to protect the fifo buffer
35 * 45 *
36 * Do NOT pass the kfifo to kfifo_free() after use! Simply free the
37 * &struct kfifo with kfree().
38 */ 46 */
39struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size, 47void kfifo_init(struct kfifo *fifo, unsigned char *buffer, unsigned int size)
40 gfp_t gfp_mask, spinlock_t *lock)
41{ 48{
42 struct kfifo *fifo;
43
44 /* size must be a power of 2 */ 49 /* size must be a power of 2 */
45 BUG_ON(!is_power_of_2(size)); 50 BUG_ON(!is_power_of_2(size));
46 51
47 fifo = kmalloc(sizeof(struct kfifo), gfp_mask); 52 _kfifo_init(fifo, buffer, size);
48 if (!fifo)
49 return ERR_PTR(-ENOMEM);
50
51 fifo->buffer = buffer;
52 fifo->size = size;
53 fifo->in = fifo->out = 0;
54 fifo->lock = lock;
55
56 return fifo;
57} 53}
58EXPORT_SYMBOL(kfifo_init); 54EXPORT_SYMBOL(kfifo_init);
59 55
60/** 56/**
61 * kfifo_alloc - allocates a new FIFO and its internal buffer 57 * kfifo_alloc - allocates a new FIFO internal buffer
62 * @size: the size of the internal buffer to be allocated. 58 * @fifo: the fifo to assign then new buffer
59 * @size: the size of the buffer to be allocated, this have to be a power of 2.
63 * @gfp_mask: get_free_pages mask, passed to kmalloc() 60 * @gfp_mask: get_free_pages mask, passed to kmalloc()
64 * @lock: the lock to be used to protect the fifo buffer 61 *
62 * This function dynamically allocates a new fifo internal buffer
65 * 63 *
66 * The size will be rounded-up to a power of 2. 64 * The size will be rounded-up to a power of 2.
65 * The buffer will be release with kfifo_free().
66 * Return 0 if no error, otherwise the an error code
67 */ 67 */
68struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t *lock) 68int kfifo_alloc(struct kfifo *fifo, unsigned int size, gfp_t gfp_mask)
69{ 69{
70 unsigned char *buffer; 70 unsigned char *buffer;
71 struct kfifo *ret;
72 71
73 /* 72 /*
74 * round up to the next power of 2, since our 'let the indices 73 * round up to the next power of 2, since our 'let the indices
@@ -80,48 +79,91 @@ struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t *lock)
80 } 79 }
81 80
82 buffer = kmalloc(size, gfp_mask); 81 buffer = kmalloc(size, gfp_mask);
83 if (!buffer) 82 if (!buffer) {
84 return ERR_PTR(-ENOMEM); 83 _kfifo_init(fifo, 0, 0);
85 84 return -ENOMEM;
86 ret = kfifo_init(buffer, size, gfp_mask, lock); 85 }
87 86
88 if (IS_ERR(ret)) 87 _kfifo_init(fifo, buffer, size);
89 kfree(buffer);
90 88
91 return ret; 89 return 0;
92} 90}
93EXPORT_SYMBOL(kfifo_alloc); 91EXPORT_SYMBOL(kfifo_alloc);
94 92
95/** 93/**
96 * kfifo_free - frees the FIFO 94 * kfifo_free - frees the FIFO internal buffer
97 * @fifo: the fifo to be freed. 95 * @fifo: the fifo to be freed.
98 */ 96 */
99void kfifo_free(struct kfifo *fifo) 97void kfifo_free(struct kfifo *fifo)
100{ 98{
101 kfree(fifo->buffer); 99 kfree(fifo->buffer);
102 kfree(fifo);
103} 100}
104EXPORT_SYMBOL(kfifo_free); 101EXPORT_SYMBOL(kfifo_free);
105 102
106/** 103/**
107 * __kfifo_put - puts some data into the FIFO, no locking version 104 * kfifo_skip - skip output data
108 * @fifo: the fifo to be used. 105 * @fifo: the fifo to be used.
109 * @buffer: the data to be added. 106 * @len: number of bytes to skip
110 * @len: the length of the data to be added.
111 *
112 * This function copies at most @len bytes from the @buffer into
113 * the FIFO depending on the free space, and returns the number of
114 * bytes copied.
115 *
116 * Note that with only one concurrent reader and one concurrent
117 * writer, you don't need extra locking to use these functions.
118 */ 107 */
119unsigned int __kfifo_put(struct kfifo *fifo, 108void kfifo_skip(struct kfifo *fifo, unsigned int len)
120 const unsigned char *buffer, unsigned int len) 109{
110 if (len < kfifo_len(fifo)) {
111 __kfifo_add_out(fifo, len);
112 return;
113 }
114 kfifo_reset_out(fifo);
115}
116EXPORT_SYMBOL(kfifo_skip);
117
118static inline void __kfifo_in_data(struct kfifo *fifo,
119 const void *from, unsigned int len, unsigned int off)
121{ 120{
122 unsigned int l; 121 unsigned int l;
123 122
124 len = min(len, fifo->size - fifo->in + fifo->out); 123 /*
124 * Ensure that we sample the fifo->out index -before- we
125 * start putting bytes into the kfifo.
126 */
127
128 smp_mb();
129
130 off = __kfifo_off(fifo, fifo->in + off);
131
132 /* first put the data starting from fifo->in to buffer end */
133 l = min(len, fifo->size - off);
134 memcpy(fifo->buffer + off, from, l);
135
136 /* then put the rest (if any) at the beginning of the buffer */
137 memcpy(fifo->buffer, from + l, len - l);
138}
139
140static inline void __kfifo_out_data(struct kfifo *fifo,
141 void *to, unsigned int len, unsigned int off)
142{
143 unsigned int l;
144
145 /*
146 * Ensure that we sample the fifo->in index -before- we
147 * start removing bytes from the kfifo.
148 */
149
150 smp_rmb();
151
152 off = __kfifo_off(fifo, fifo->out + off);
153
154 /* first get the data from fifo->out until the end of the buffer */
155 l = min(len, fifo->size - off);
156 memcpy(to, fifo->buffer + off, l);
157
158 /* then get the rest (if any) from the beginning of the buffer */
159 memcpy(to + l, fifo->buffer, len - l);
160}
161
162static inline unsigned int __kfifo_from_user_data(struct kfifo *fifo,
163 const void __user *from, unsigned int len, unsigned int off)
164{
165 unsigned int l;
166 int ret;
125 167
126 /* 168 /*
127 * Ensure that we sample the fifo->out index -before- we 169 * Ensure that we sample the fifo->out index -before- we
@@ -130,68 +172,229 @@ unsigned int __kfifo_put(struct kfifo *fifo,
130 172
131 smp_mb(); 173 smp_mb();
132 174
175 off = __kfifo_off(fifo, fifo->in + off);
176
133 /* first put the data starting from fifo->in to buffer end */ 177 /* first put the data starting from fifo->in to buffer end */
134 l = min(len, fifo->size - (fifo->in & (fifo->size - 1))); 178 l = min(len, fifo->size - off);
135 memcpy(fifo->buffer + (fifo->in & (fifo->size - 1)), buffer, l); 179 ret = copy_from_user(fifo->buffer + off, from, l);
180
181 if (unlikely(ret))
182 return ret + len - l;
136 183
137 /* then put the rest (if any) at the beginning of the buffer */ 184 /* then put the rest (if any) at the beginning of the buffer */
138 memcpy(fifo->buffer, buffer + l, len - l); 185 return copy_from_user(fifo->buffer, from + l, len - l);
186}
187
188static inline unsigned int __kfifo_to_user_data(struct kfifo *fifo,
189 void __user *to, unsigned int len, unsigned int off)
190{
191 unsigned int l;
192 int ret;
139 193
140 /* 194 /*
141 * Ensure that we add the bytes to the kfifo -before- 195 * Ensure that we sample the fifo->in index -before- we
142 * we update the fifo->in index. 196 * start removing bytes from the kfifo.
143 */ 197 */
144 198
145 smp_wmb(); 199 smp_rmb();
200
201 off = __kfifo_off(fifo, fifo->out + off);
202
203 /* first get the data from fifo->out until the end of the buffer */
204 l = min(len, fifo->size - off);
205 ret = copy_to_user(to, fifo->buffer + off, l);
206
207 if (unlikely(ret))
208 return ret + len - l;
209
210 /* then get the rest (if any) from the beginning of the buffer */
211 return copy_to_user(to + l, fifo->buffer, len - l);
212}
213
214unsigned int __kfifo_in_n(struct kfifo *fifo,
215 const void *from, unsigned int len, unsigned int recsize)
216{
217 if (kfifo_avail(fifo) < len + recsize)
218 return len + 1;
219
220 __kfifo_in_data(fifo, from, len, recsize);
221 return 0;
222}
223EXPORT_SYMBOL(__kfifo_in_n);
146 224
147 fifo->in += len; 225/**
226 * kfifo_in - puts some data into the FIFO
227 * @fifo: the fifo to be used.
228 * @from: the data to be added.
229 * @len: the length of the data to be added.
230 *
231 * This function copies at most @len bytes from the @from buffer into
232 * the FIFO depending on the free space, and returns the number of
233 * bytes copied.
234 *
235 * Note that with only one concurrent reader and one concurrent
236 * writer, you don't need extra locking to use these functions.
237 */
238unsigned int kfifo_in(struct kfifo *fifo, const unsigned char *from,
239 unsigned int len)
240{
241 len = min(kfifo_avail(fifo), len);
148 242
243 __kfifo_in_data(fifo, from, len, 0);
244 __kfifo_add_in(fifo, len);
149 return len; 245 return len;
150} 246}
151EXPORT_SYMBOL(__kfifo_put); 247EXPORT_SYMBOL(kfifo_in);
248
249unsigned int __kfifo_in_generic(struct kfifo *fifo,
250 const void *from, unsigned int len, unsigned int recsize)
251{
252 return __kfifo_in_rec(fifo, from, len, recsize);
253}
254EXPORT_SYMBOL(__kfifo_in_generic);
255
256unsigned int __kfifo_out_n(struct kfifo *fifo,
257 void *to, unsigned int len, unsigned int recsize)
258{
259 if (kfifo_len(fifo) < len + recsize)
260 return len;
261
262 __kfifo_out_data(fifo, to, len, recsize);
263 __kfifo_add_out(fifo, len + recsize);
264 return 0;
265}
266EXPORT_SYMBOL(__kfifo_out_n);
152 267
153/** 268/**
154 * __kfifo_get - gets some data from the FIFO, no locking version 269 * kfifo_out - gets some data from the FIFO
155 * @fifo: the fifo to be used. 270 * @fifo: the fifo to be used.
156 * @buffer: where the data must be copied. 271 * @to: where the data must be copied.
157 * @len: the size of the destination buffer. 272 * @len: the size of the destination buffer.
158 * 273 *
159 * This function copies at most @len bytes from the FIFO into the 274 * This function copies at most @len bytes from the FIFO into the
160 * @buffer and returns the number of copied bytes. 275 * @to buffer and returns the number of copied bytes.
161 * 276 *
162 * Note that with only one concurrent reader and one concurrent 277 * Note that with only one concurrent reader and one concurrent
163 * writer, you don't need extra locking to use these functions. 278 * writer, you don't need extra locking to use these functions.
164 */ 279 */
165unsigned int __kfifo_get(struct kfifo *fifo, 280unsigned int kfifo_out(struct kfifo *fifo, unsigned char *to, unsigned int len)
166 unsigned char *buffer, unsigned int len)
167{ 281{
168 unsigned int l; 282 len = min(kfifo_len(fifo), len);
169 283
170 len = min(len, fifo->in - fifo->out); 284 __kfifo_out_data(fifo, to, len, 0);
285 __kfifo_add_out(fifo, len);
171 286
172 /* 287 return len;
173 * Ensure that we sample the fifo->in index -before- we 288}
174 * start removing bytes from the kfifo. 289EXPORT_SYMBOL(kfifo_out);
175 */
176 290
177 smp_rmb(); 291unsigned int __kfifo_out_generic(struct kfifo *fifo,
292 void *to, unsigned int len, unsigned int recsize,
293 unsigned int *total)
294{
295 return __kfifo_out_rec(fifo, to, len, recsize, total);
296}
297EXPORT_SYMBOL(__kfifo_out_generic);
178 298
179 /* first get the data from fifo->out until the end of the buffer */ 299unsigned int __kfifo_from_user_n(struct kfifo *fifo,
180 l = min(len, fifo->size - (fifo->out & (fifo->size - 1))); 300 const void __user *from, unsigned int len, unsigned int recsize)
181 memcpy(buffer, fifo->buffer + (fifo->out & (fifo->size - 1)), l); 301{
302 if (kfifo_avail(fifo) < len + recsize)
303 return len + 1;
182 304
183 /* then get the rest (if any) from the beginning of the buffer */ 305 return __kfifo_from_user_data(fifo, from, len, recsize);
184 memcpy(buffer + l, fifo->buffer, len - l); 306}
307EXPORT_SYMBOL(__kfifo_from_user_n);
185 308
186 /* 309/**
187 * Ensure that we remove the bytes from the kfifo -before- 310 * kfifo_from_user - puts some data from user space into the FIFO
188 * we update the fifo->out index. 311 * @fifo: the fifo to be used.
189 */ 312 * @from: pointer to the data to be added.
313 * @len: the length of the data to be added.
314 *
315 * This function copies at most @len bytes from the @from into the
316 * FIFO depending and returns the number of copied bytes.
317 *
318 * Note that with only one concurrent reader and one concurrent
319 * writer, you don't need extra locking to use these functions.
320 */
321unsigned int kfifo_from_user(struct kfifo *fifo,
322 const void __user *from, unsigned int len)
323{
324 len = min(kfifo_avail(fifo), len);
325 len -= __kfifo_from_user_data(fifo, from, len, 0);
326 __kfifo_add_in(fifo, len);
327 return len;
328}
329EXPORT_SYMBOL(kfifo_from_user);
190 330
191 smp_mb(); 331unsigned int __kfifo_from_user_generic(struct kfifo *fifo,
332 const void __user *from, unsigned int len, unsigned int recsize)
333{
334 return __kfifo_from_user_rec(fifo, from, len, recsize);
335}
336EXPORT_SYMBOL(__kfifo_from_user_generic);
192 337
193 fifo->out += len; 338unsigned int __kfifo_to_user_n(struct kfifo *fifo,
339 void __user *to, unsigned int len, unsigned int reclen,
340 unsigned int recsize)
341{
342 unsigned int ret;
343
344 if (kfifo_len(fifo) < reclen + recsize)
345 return len;
346
347 ret = __kfifo_to_user_data(fifo, to, reclen, recsize);
194 348
349 if (likely(ret == 0))
350 __kfifo_add_out(fifo, reclen + recsize);
351
352 return ret;
353}
354EXPORT_SYMBOL(__kfifo_to_user_n);
355
356/**
357 * kfifo_to_user - gets data from the FIFO and write it to user space
358 * @fifo: the fifo to be used.
359 * @to: where the data must be copied.
360 * @len: the size of the destination buffer.
361 *
362 * This function copies at most @len bytes from the FIFO into the
363 * @to buffer and returns the number of copied bytes.
364 *
365 * Note that with only one concurrent reader and one concurrent
366 * writer, you don't need extra locking to use these functions.
367 */
368unsigned int kfifo_to_user(struct kfifo *fifo,
369 void __user *to, unsigned int len)
370{
371 len = min(kfifo_len(fifo), len);
372 len -= __kfifo_to_user_data(fifo, to, len, 0);
373 __kfifo_add_out(fifo, len);
195 return len; 374 return len;
196} 375}
197EXPORT_SYMBOL(__kfifo_get); 376EXPORT_SYMBOL(kfifo_to_user);
377
378unsigned int __kfifo_to_user_generic(struct kfifo *fifo,
379 void __user *to, unsigned int len, unsigned int recsize,
380 unsigned int *total)
381{
382 return __kfifo_to_user_rec(fifo, to, len, recsize, total);
383}
384EXPORT_SYMBOL(__kfifo_to_user_generic);
385
386unsigned int __kfifo_peek_generic(struct kfifo *fifo, unsigned int recsize)
387{
388 if (recsize == 0)
389 return kfifo_avail(fifo);
390
391 return __kfifo_peek_n(fifo, recsize);
392}
393EXPORT_SYMBOL(__kfifo_peek_generic);
394
395void __kfifo_skip_generic(struct kfifo *fifo, unsigned int recsize)
396{
397 __kfifo_skip_rec(fifo, recsize);
398}
399EXPORT_SYMBOL(__kfifo_skip_generic);
400
diff --git a/kernel/resource.c b/kernel/resource.c
index dc15686b7a77..af96c1e4b54b 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -308,37 +308,37 @@ static int find_resource(struct resource *root, struct resource *new,
308 void *alignf_data) 308 void *alignf_data)
309{ 309{
310 struct resource *this = root->child; 310 struct resource *this = root->child;
311 resource_size_t start, end; 311 struct resource tmp = *new;
312 312
313 start = root->start; 313 tmp.start = root->start;
314 /* 314 /*
315 * Skip past an allocated resource that starts at 0, since the assignment 315 * Skip past an allocated resource that starts at 0, since the assignment
316 * of this->start - 1 to new->end below would cause an underflow. 316 * of this->start - 1 to tmp->end below would cause an underflow.
317 */ 317 */
318 if (this && this->start == 0) { 318 if (this && this->start == 0) {
319 start = this->end + 1; 319 tmp.start = this->end + 1;
320 this = this->sibling; 320 this = this->sibling;
321 } 321 }
322 for(;;) { 322 for(;;) {
323 if (this) 323 if (this)
324 end = this->start - 1; 324 tmp.end = this->start - 1;
325 else 325 else
326 end = root->end; 326 tmp.end = root->end;
327 if (start < min) 327 if (tmp.start < min)
328 start = min; 328 tmp.start = min;
329 if (end > max) 329 if (tmp.end > max)
330 end = max; 330 tmp.end = max;
331 start = ALIGN(start, align); 331 tmp.start = ALIGN(tmp.start, align);
332 if (alignf) 332 if (alignf)
333 alignf(alignf_data, new, size, align); 333 alignf(alignf_data, &tmp, size, align);
334 if (start < end && end - start >= size - 1) { 334 if (tmp.start < tmp.end && tmp.end - tmp.start >= size - 1) {
335 new->start = start; 335 new->start = tmp.start;
336 new->end = start + size - 1; 336 new->end = tmp.start + size - 1;
337 return 0; 337 return 0;
338 } 338 }
339 if (!this) 339 if (!this)
340 break; 340 break;
341 start = this->end + 1; 341 tmp.start = this->end + 1;
342 this = this->sibling; 342 this = this->sibling;
343 } 343 }
344 return -EBUSY; 344 return -EBUSY;
diff --git a/kernel/time.c b/kernel/time.c
index c6324d96009e..804798005d19 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -136,6 +136,7 @@ static inline void warp_clock(void)
136 write_seqlock_irq(&xtime_lock); 136 write_seqlock_irq(&xtime_lock);
137 wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60; 137 wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60;
138 xtime.tv_sec += sys_tz.tz_minuteswest * 60; 138 xtime.tv_sec += sys_tz.tz_minuteswest * 60;
139 update_xtime_cache(0);
139 write_sequnlock_irq(&xtime_lock); 140 write_sequnlock_irq(&xtime_lock);
140 clock_was_set(); 141 clock_was_set();
141} 142}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index af4135f05825..7faaa32fbf4f 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -165,6 +165,13 @@ struct timespec raw_time;
165/* flag for if timekeeping is suspended */ 165/* flag for if timekeeping is suspended */
166int __read_mostly timekeeping_suspended; 166int __read_mostly timekeeping_suspended;
167 167
168static struct timespec xtime_cache __attribute__ ((aligned (16)));
169void update_xtime_cache(u64 nsec)
170{
171 xtime_cache = xtime;
172 timespec_add_ns(&xtime_cache, nsec);
173}
174
168/* must hold xtime_lock */ 175/* must hold xtime_lock */
169void timekeeping_leap_insert(int leapsecond) 176void timekeeping_leap_insert(int leapsecond)
170{ 177{
@@ -325,6 +332,8 @@ int do_settimeofday(struct timespec *tv)
325 332
326 xtime = *tv; 333 xtime = *tv;
327 334
335 update_xtime_cache(0);
336
328 timekeeper.ntp_error = 0; 337 timekeeper.ntp_error = 0;
329 ntp_clear(); 338 ntp_clear();
330 339
@@ -550,6 +559,7 @@ void __init timekeeping_init(void)
550 } 559 }
551 set_normalized_timespec(&wall_to_monotonic, 560 set_normalized_timespec(&wall_to_monotonic,
552 -boot.tv_sec, -boot.tv_nsec); 561 -boot.tv_sec, -boot.tv_nsec);
562 update_xtime_cache(0);
553 total_sleep_time.tv_sec = 0; 563 total_sleep_time.tv_sec = 0;
554 total_sleep_time.tv_nsec = 0; 564 total_sleep_time.tv_nsec = 0;
555 write_sequnlock_irqrestore(&xtime_lock, flags); 565 write_sequnlock_irqrestore(&xtime_lock, flags);
@@ -583,6 +593,7 @@ static int timekeeping_resume(struct sys_device *dev)
583 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts); 593 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts);
584 total_sleep_time = timespec_add_safe(total_sleep_time, ts); 594 total_sleep_time = timespec_add_safe(total_sleep_time, ts);
585 } 595 }
596 update_xtime_cache(0);
586 /* re-base the last cycle value */ 597 /* re-base the last cycle value */
587 timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); 598 timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
588 timekeeper.ntp_error = 0; 599 timekeeper.ntp_error = 0;
@@ -722,6 +733,7 @@ static void timekeeping_adjust(s64 offset)
722 timekeeper.ntp_error_shift; 733 timekeeper.ntp_error_shift;
723} 734}
724 735
736
725/** 737/**
726 * logarithmic_accumulation - shifted accumulation of cycles 738 * logarithmic_accumulation - shifted accumulation of cycles
727 * 739 *
@@ -765,6 +777,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
765 return offset; 777 return offset;
766} 778}
767 779
780
768/** 781/**
769 * update_wall_time - Uses the current clocksource to increment the wall time 782 * update_wall_time - Uses the current clocksource to increment the wall time
770 * 783 *
@@ -774,6 +787,7 @@ void update_wall_time(void)
774{ 787{
775 struct clocksource *clock; 788 struct clocksource *clock;
776 cycle_t offset; 789 cycle_t offset;
790 u64 nsecs;
777 int shift = 0, maxshift; 791 int shift = 0, maxshift;
778 792
779 /* Make sure we're fully resumed: */ 793 /* Make sure we're fully resumed: */
@@ -839,6 +853,9 @@ void update_wall_time(void)
839 timekeeper.ntp_error += timekeeper.xtime_nsec << 853 timekeeper.ntp_error += timekeeper.xtime_nsec <<
840 timekeeper.ntp_error_shift; 854 timekeeper.ntp_error_shift;
841 855
856 nsecs = clocksource_cyc2ns(offset, timekeeper.mult, timekeeper.shift);
857 update_xtime_cache(nsecs);
858
842 /* check to see if there is a new clocksource to use */ 859 /* check to see if there is a new clocksource to use */
843 update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); 860 update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
844} 861}
@@ -875,13 +892,13 @@ void monotonic_to_bootbased(struct timespec *ts)
875 892
876unsigned long get_seconds(void) 893unsigned long get_seconds(void)
877{ 894{
878 return xtime.tv_sec; 895 return xtime_cache.tv_sec;
879} 896}
880EXPORT_SYMBOL(get_seconds); 897EXPORT_SYMBOL(get_seconds);
881 898
882struct timespec __current_kernel_time(void) 899struct timespec __current_kernel_time(void)
883{ 900{
884 return xtime; 901 return xtime_cache;
885} 902}
886 903
887struct timespec current_kernel_time(void) 904struct timespec current_kernel_time(void)
@@ -891,7 +908,8 @@ struct timespec current_kernel_time(void)
891 908
892 do { 909 do {
893 seq = read_seqbegin(&xtime_lock); 910 seq = read_seqbegin(&xtime_lock);
894 now = xtime; 911
912 now = xtime_cache;
895 } while (read_seqretry(&xtime_lock, seq)); 913 } while (read_seqretry(&xtime_lock, seq));
896 914
897 return now; 915 return now;
@@ -905,7 +923,8 @@ struct timespec get_monotonic_coarse(void)
905 923
906 do { 924 do {
907 seq = read_seqbegin(&xtime_lock); 925 seq = read_seqbegin(&xtime_lock);
908 now = xtime; 926
927 now = xtime_cache;
909 mono = wall_to_monotonic; 928 mono = wall_to_monotonic;
910 } while (read_seqretry(&xtime_lock, seq)); 929 } while (read_seqretry(&xtime_lock, seq));
911 930