diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/auditsc.c | 1 | ||||
| -rw-r--r-- | kernel/kfifo.c | 361 | ||||
| -rw-r--r-- | kernel/perf_event.c | 2 | ||||
| -rw-r--r-- | kernel/resource.c | 32 | ||||
| -rw-r--r-- | kernel/sched.c | 91 | ||||
| -rw-r--r-- | kernel/sched_idletask.c | 2 | ||||
| -rw-r--r-- | kernel/time.c | 1 | ||||
| -rw-r--r-- | kernel/time/timekeeping.c | 27 |
8 files changed, 374 insertions, 143 deletions
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 267e484f0198..fc0f928167e7 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
| @@ -250,7 +250,6 @@ struct audit_context { | |||
| 250 | #endif | 250 | #endif |
| 251 | }; | 251 | }; |
| 252 | 252 | ||
| 253 | #define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE]) | ||
| 254 | static inline int open_arg(int flags, int mask) | 253 | static inline int open_arg(int flags, int mask) |
| 255 | { | 254 | { |
| 256 | int n = ACC_MODE(flags); | 255 | int n = ACC_MODE(flags); |
diff --git a/kernel/kfifo.c b/kernel/kfifo.c index 3765ff3c1bbe..e92d519f93b1 100644 --- a/kernel/kfifo.c +++ b/kernel/kfifo.c | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * A simple kernel FIFO implementation. | 2 | * A generic kernel FIFO implementation. |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2009 Stefani Seibold <stefani@seibold.net> | ||
| 4 | * Copyright (C) 2004 Stelian Pop <stelian@popies.net> | 5 | * Copyright (C) 2004 Stelian Pop <stelian@popies.net> |
| 5 | * | 6 | * |
| 6 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
| @@ -25,50 +26,48 @@ | |||
| 25 | #include <linux/err.h> | 26 | #include <linux/err.h> |
| 26 | #include <linux/kfifo.h> | 27 | #include <linux/kfifo.h> |
| 27 | #include <linux/log2.h> | 28 | #include <linux/log2.h> |
| 29 | #include <linux/uaccess.h> | ||
| 30 | |||
| 31 | static void _kfifo_init(struct kfifo *fifo, unsigned char *buffer, | ||
| 32 | unsigned int size) | ||
| 33 | { | ||
| 34 | fifo->buffer = buffer; | ||
| 35 | fifo->size = size; | ||
| 36 | |||
| 37 | kfifo_reset(fifo); | ||
| 38 | } | ||
| 28 | 39 | ||
| 29 | /** | 40 | /** |
| 30 | * kfifo_init - allocates a new FIFO using a preallocated buffer | 41 | * kfifo_init - initialize a FIFO using a preallocated buffer |
| 42 | * @fifo: the fifo to assign the buffer | ||
| 31 | * @buffer: the preallocated buffer to be used. | 43 | * @buffer: the preallocated buffer to be used. |
| 32 | * @size: the size of the internal buffer, this have to be a power of 2. | 44 | * @size: the size of the internal buffer, this have to be a power of 2. |
| 33 | * @gfp_mask: get_free_pages mask, passed to kmalloc() | ||
| 34 | * @lock: the lock to be used to protect the fifo buffer | ||
| 35 | * | 45 | * |
| 36 | * Do NOT pass the kfifo to kfifo_free() after use! Simply free the | ||
| 37 | * &struct kfifo with kfree(). | ||
| 38 | */ | 46 | */ |
| 39 | struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size, | 47 | void kfifo_init(struct kfifo *fifo, unsigned char *buffer, unsigned int size) |
| 40 | gfp_t gfp_mask, spinlock_t *lock) | ||
| 41 | { | 48 | { |
| 42 | struct kfifo *fifo; | ||
| 43 | |||
| 44 | /* size must be a power of 2 */ | 49 | /* size must be a power of 2 */ |
| 45 | BUG_ON(!is_power_of_2(size)); | 50 | BUG_ON(!is_power_of_2(size)); |
| 46 | 51 | ||
| 47 | fifo = kmalloc(sizeof(struct kfifo), gfp_mask); | 52 | _kfifo_init(fifo, buffer, size); |
| 48 | if (!fifo) | ||
| 49 | return ERR_PTR(-ENOMEM); | ||
| 50 | |||
| 51 | fifo->buffer = buffer; | ||
| 52 | fifo->size = size; | ||
| 53 | fifo->in = fifo->out = 0; | ||
| 54 | fifo->lock = lock; | ||
| 55 | |||
| 56 | return fifo; | ||
| 57 | } | 53 | } |
| 58 | EXPORT_SYMBOL(kfifo_init); | 54 | EXPORT_SYMBOL(kfifo_init); |
| 59 | 55 | ||
| 60 | /** | 56 | /** |
| 61 | * kfifo_alloc - allocates a new FIFO and its internal buffer | 57 | * kfifo_alloc - allocates a new FIFO internal buffer |
| 62 | * @size: the size of the internal buffer to be allocated. | 58 | * @fifo: the fifo to assign then new buffer |
| 59 | * @size: the size of the buffer to be allocated, this have to be a power of 2. | ||
| 63 | * @gfp_mask: get_free_pages mask, passed to kmalloc() | 60 | * @gfp_mask: get_free_pages mask, passed to kmalloc() |
| 64 | * @lock: the lock to be used to protect the fifo buffer | 61 | * |
| 62 | * This function dynamically allocates a new fifo internal buffer | ||
| 65 | * | 63 | * |
| 66 | * The size will be rounded-up to a power of 2. | 64 | * The size will be rounded-up to a power of 2. |
| 65 | * The buffer will be release with kfifo_free(). | ||
| 66 | * Return 0 if no error, otherwise the an error code | ||
| 67 | */ | 67 | */ |
| 68 | struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t *lock) | 68 | int kfifo_alloc(struct kfifo *fifo, unsigned int size, gfp_t gfp_mask) |
| 69 | { | 69 | { |
| 70 | unsigned char *buffer; | 70 | unsigned char *buffer; |
| 71 | struct kfifo *ret; | ||
| 72 | 71 | ||
| 73 | /* | 72 | /* |
| 74 | * round up to the next power of 2, since our 'let the indices | 73 | * round up to the next power of 2, since our 'let the indices |
| @@ -80,48 +79,91 @@ struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t *lock) | |||
| 80 | } | 79 | } |
| 81 | 80 | ||
| 82 | buffer = kmalloc(size, gfp_mask); | 81 | buffer = kmalloc(size, gfp_mask); |
| 83 | if (!buffer) | 82 | if (!buffer) { |
| 84 | return ERR_PTR(-ENOMEM); | 83 | _kfifo_init(fifo, 0, 0); |
| 85 | 84 | return -ENOMEM; | |
| 86 | ret = kfifo_init(buffer, size, gfp_mask, lock); | 85 | } |
| 87 | 86 | ||
| 88 | if (IS_ERR(ret)) | 87 | _kfifo_init(fifo, buffer, size); |
| 89 | kfree(buffer); | ||
| 90 | 88 | ||
| 91 | return ret; | 89 | return 0; |
| 92 | } | 90 | } |
| 93 | EXPORT_SYMBOL(kfifo_alloc); | 91 | EXPORT_SYMBOL(kfifo_alloc); |
| 94 | 92 | ||
| 95 | /** | 93 | /** |
| 96 | * kfifo_free - frees the FIFO | 94 | * kfifo_free - frees the FIFO internal buffer |
| 97 | * @fifo: the fifo to be freed. | 95 | * @fifo: the fifo to be freed. |
| 98 | */ | 96 | */ |
| 99 | void kfifo_free(struct kfifo *fifo) | 97 | void kfifo_free(struct kfifo *fifo) |
| 100 | { | 98 | { |
| 101 | kfree(fifo->buffer); | 99 | kfree(fifo->buffer); |
| 102 | kfree(fifo); | ||
| 103 | } | 100 | } |
| 104 | EXPORT_SYMBOL(kfifo_free); | 101 | EXPORT_SYMBOL(kfifo_free); |
| 105 | 102 | ||
| 106 | /** | 103 | /** |
| 107 | * __kfifo_put - puts some data into the FIFO, no locking version | 104 | * kfifo_skip - skip output data |
| 108 | * @fifo: the fifo to be used. | 105 | * @fifo: the fifo to be used. |
| 109 | * @buffer: the data to be added. | 106 | * @len: number of bytes to skip |
| 110 | * @len: the length of the data to be added. | ||
| 111 | * | ||
| 112 | * This function copies at most @len bytes from the @buffer into | ||
| 113 | * the FIFO depending on the free space, and returns the number of | ||
| 114 | * bytes copied. | ||
| 115 | * | ||
| 116 | * Note that with only one concurrent reader and one concurrent | ||
| 117 | * writer, you don't need extra locking to use these functions. | ||
| 118 | */ | 107 | */ |
| 119 | unsigned int __kfifo_put(struct kfifo *fifo, | 108 | void kfifo_skip(struct kfifo *fifo, unsigned int len) |
| 120 | const unsigned char *buffer, unsigned int len) | 109 | { |
| 110 | if (len < kfifo_len(fifo)) { | ||
| 111 | __kfifo_add_out(fifo, len); | ||
| 112 | return; | ||
| 113 | } | ||
| 114 | kfifo_reset_out(fifo); | ||
| 115 | } | ||
| 116 | EXPORT_SYMBOL(kfifo_skip); | ||
| 117 | |||
| 118 | static inline void __kfifo_in_data(struct kfifo *fifo, | ||
| 119 | const void *from, unsigned int len, unsigned int off) | ||
| 121 | { | 120 | { |
| 122 | unsigned int l; | 121 | unsigned int l; |
| 123 | 122 | ||
| 124 | len = min(len, fifo->size - fifo->in + fifo->out); | 123 | /* |
| 124 | * Ensure that we sample the fifo->out index -before- we | ||
| 125 | * start putting bytes into the kfifo. | ||
| 126 | */ | ||
| 127 | |||
| 128 | smp_mb(); | ||
| 129 | |||
| 130 | off = __kfifo_off(fifo, fifo->in + off); | ||
| 131 | |||
| 132 | /* first put the data starting from fifo->in to buffer end */ | ||
| 133 | l = min(len, fifo->size - off); | ||
| 134 | memcpy(fifo->buffer + off, from, l); | ||
| 135 | |||
| 136 | /* then put the rest (if any) at the beginning of the buffer */ | ||
| 137 | memcpy(fifo->buffer, from + l, len - l); | ||
| 138 | } | ||
| 139 | |||
| 140 | static inline void __kfifo_out_data(struct kfifo *fifo, | ||
| 141 | void *to, unsigned int len, unsigned int off) | ||
| 142 | { | ||
| 143 | unsigned int l; | ||
| 144 | |||
| 145 | /* | ||
| 146 | * Ensure that we sample the fifo->in index -before- we | ||
| 147 | * start removing bytes from the kfifo. | ||
| 148 | */ | ||
| 149 | |||
| 150 | smp_rmb(); | ||
| 151 | |||
| 152 | off = __kfifo_off(fifo, fifo->out + off); | ||
| 153 | |||
| 154 | /* first get the data from fifo->out until the end of the buffer */ | ||
| 155 | l = min(len, fifo->size - off); | ||
| 156 | memcpy(to, fifo->buffer + off, l); | ||
| 157 | |||
| 158 | /* then get the rest (if any) from the beginning of the buffer */ | ||
| 159 | memcpy(to + l, fifo->buffer, len - l); | ||
| 160 | } | ||
| 161 | |||
| 162 | static inline unsigned int __kfifo_from_user_data(struct kfifo *fifo, | ||
| 163 | const void __user *from, unsigned int len, unsigned int off) | ||
| 164 | { | ||
| 165 | unsigned int l; | ||
| 166 | int ret; | ||
| 125 | 167 | ||
| 126 | /* | 168 | /* |
| 127 | * Ensure that we sample the fifo->out index -before- we | 169 | * Ensure that we sample the fifo->out index -before- we |
| @@ -130,68 +172,229 @@ unsigned int __kfifo_put(struct kfifo *fifo, | |||
| 130 | 172 | ||
| 131 | smp_mb(); | 173 | smp_mb(); |
| 132 | 174 | ||
| 175 | off = __kfifo_off(fifo, fifo->in + off); | ||
| 176 | |||
| 133 | /* first put the data starting from fifo->in to buffer end */ | 177 | /* first put the data starting from fifo->in to buffer end */ |
| 134 | l = min(len, fifo->size - (fifo->in & (fifo->size - 1))); | 178 | l = min(len, fifo->size - off); |
| 135 | memcpy(fifo->buffer + (fifo->in & (fifo->size - 1)), buffer, l); | 179 | ret = copy_from_user(fifo->buffer + off, from, l); |
| 180 | |||
| 181 | if (unlikely(ret)) | ||
| 182 | return ret + len - l; | ||
| 136 | 183 | ||
| 137 | /* then put the rest (if any) at the beginning of the buffer */ | 184 | /* then put the rest (if any) at the beginning of the buffer */ |
| 138 | memcpy(fifo->buffer, buffer + l, len - l); | 185 | return copy_from_user(fifo->buffer, from + l, len - l); |
| 186 | } | ||
| 187 | |||
| 188 | static inline unsigned int __kfifo_to_user_data(struct kfifo *fifo, | ||
| 189 | void __user *to, unsigned int len, unsigned int off) | ||
| 190 | { | ||
| 191 | unsigned int l; | ||
| 192 | int ret; | ||
| 139 | 193 | ||
| 140 | /* | 194 | /* |
| 141 | * Ensure that we add the bytes to the kfifo -before- | 195 | * Ensure that we sample the fifo->in index -before- we |
| 142 | * we update the fifo->in index. | 196 | * start removing bytes from the kfifo. |
| 143 | */ | 197 | */ |
| 144 | 198 | ||
| 145 | smp_wmb(); | 199 | smp_rmb(); |
| 200 | |||
| 201 | off = __kfifo_off(fifo, fifo->out + off); | ||
| 202 | |||
| 203 | /* first get the data from fifo->out until the end of the buffer */ | ||
| 204 | l = min(len, fifo->size - off); | ||
| 205 | ret = copy_to_user(to, fifo->buffer + off, l); | ||
| 206 | |||
| 207 | if (unlikely(ret)) | ||
| 208 | return ret + len - l; | ||
| 209 | |||
| 210 | /* then get the rest (if any) from the beginning of the buffer */ | ||
| 211 | return copy_to_user(to + l, fifo->buffer, len - l); | ||
| 212 | } | ||
| 213 | |||
| 214 | unsigned int __kfifo_in_n(struct kfifo *fifo, | ||
| 215 | const void *from, unsigned int len, unsigned int recsize) | ||
| 216 | { | ||
| 217 | if (kfifo_avail(fifo) < len + recsize) | ||
| 218 | return len + 1; | ||
| 219 | |||
| 220 | __kfifo_in_data(fifo, from, len, recsize); | ||
| 221 | return 0; | ||
| 222 | } | ||
| 223 | EXPORT_SYMBOL(__kfifo_in_n); | ||
| 146 | 224 | ||
| 147 | fifo->in += len; | 225 | /** |
| 226 | * kfifo_in - puts some data into the FIFO | ||
| 227 | * @fifo: the fifo to be used. | ||
| 228 | * @from: the data to be added. | ||
| 229 | * @len: the length of the data to be added. | ||
| 230 | * | ||
| 231 | * This function copies at most @len bytes from the @from buffer into | ||
| 232 | * the FIFO depending on the free space, and returns the number of | ||
| 233 | * bytes copied. | ||
| 234 | * | ||
| 235 | * Note that with only one concurrent reader and one concurrent | ||
| 236 | * writer, you don't need extra locking to use these functions. | ||
| 237 | */ | ||
| 238 | unsigned int kfifo_in(struct kfifo *fifo, const unsigned char *from, | ||
| 239 | unsigned int len) | ||
| 240 | { | ||
| 241 | len = min(kfifo_avail(fifo), len); | ||
| 148 | 242 | ||
| 243 | __kfifo_in_data(fifo, from, len, 0); | ||
| 244 | __kfifo_add_in(fifo, len); | ||
| 149 | return len; | 245 | return len; |
| 150 | } | 246 | } |
| 151 | EXPORT_SYMBOL(__kfifo_put); | 247 | EXPORT_SYMBOL(kfifo_in); |
| 248 | |||
| 249 | unsigned int __kfifo_in_generic(struct kfifo *fifo, | ||
| 250 | const void *from, unsigned int len, unsigned int recsize) | ||
| 251 | { | ||
| 252 | return __kfifo_in_rec(fifo, from, len, recsize); | ||
| 253 | } | ||
| 254 | EXPORT_SYMBOL(__kfifo_in_generic); | ||
| 255 | |||
| 256 | unsigned int __kfifo_out_n(struct kfifo *fifo, | ||
| 257 | void *to, unsigned int len, unsigned int recsize) | ||
| 258 | { | ||
| 259 | if (kfifo_len(fifo) < len + recsize) | ||
| 260 | return len; | ||
| 261 | |||
| 262 | __kfifo_out_data(fifo, to, len, recsize); | ||
| 263 | __kfifo_add_out(fifo, len + recsize); | ||
| 264 | return 0; | ||
| 265 | } | ||
| 266 | EXPORT_SYMBOL(__kfifo_out_n); | ||
| 152 | 267 | ||
| 153 | /** | 268 | /** |
| 154 | * __kfifo_get - gets some data from the FIFO, no locking version | 269 | * kfifo_out - gets some data from the FIFO |
| 155 | * @fifo: the fifo to be used. | 270 | * @fifo: the fifo to be used. |
| 156 | * @buffer: where the data must be copied. | 271 | * @to: where the data must be copied. |
| 157 | * @len: the size of the destination buffer. | 272 | * @len: the size of the destination buffer. |
| 158 | * | 273 | * |
| 159 | * This function copies at most @len bytes from the FIFO into the | 274 | * This function copies at most @len bytes from the FIFO into the |
| 160 | * @buffer and returns the number of copied bytes. | 275 | * @to buffer and returns the number of copied bytes. |
| 161 | * | 276 | * |
| 162 | * Note that with only one concurrent reader and one concurrent | 277 | * Note that with only one concurrent reader and one concurrent |
| 163 | * writer, you don't need extra locking to use these functions. | 278 | * writer, you don't need extra locking to use these functions. |
| 164 | */ | 279 | */ |
| 165 | unsigned int __kfifo_get(struct kfifo *fifo, | 280 | unsigned int kfifo_out(struct kfifo *fifo, unsigned char *to, unsigned int len) |
| 166 | unsigned char *buffer, unsigned int len) | ||
| 167 | { | 281 | { |
| 168 | unsigned int l; | 282 | len = min(kfifo_len(fifo), len); |
| 169 | 283 | ||
| 170 | len = min(len, fifo->in - fifo->out); | 284 | __kfifo_out_data(fifo, to, len, 0); |
| 285 | __kfifo_add_out(fifo, len); | ||
| 171 | 286 | ||
| 172 | /* | 287 | return len; |
| 173 | * Ensure that we sample the fifo->in index -before- we | 288 | } |
| 174 | * start removing bytes from the kfifo. | 289 | EXPORT_SYMBOL(kfifo_out); |
| 175 | */ | ||
| 176 | 290 | ||
| 177 | smp_rmb(); | 291 | unsigned int __kfifo_out_generic(struct kfifo *fifo, |
| 292 | void *to, unsigned int len, unsigned int recsize, | ||
| 293 | unsigned int *total) | ||
| 294 | { | ||
| 295 | return __kfifo_out_rec(fifo, to, len, recsize, total); | ||
| 296 | } | ||
| 297 | EXPORT_SYMBOL(__kfifo_out_generic); | ||
| 178 | 298 | ||
| 179 | /* first get the data from fifo->out until the end of the buffer */ | 299 | unsigned int __kfifo_from_user_n(struct kfifo *fifo, |
| 180 | l = min(len, fifo->size - (fifo->out & (fifo->size - 1))); | 300 | const void __user *from, unsigned int len, unsigned int recsize) |
| 181 | memcpy(buffer, fifo->buffer + (fifo->out & (fifo->size - 1)), l); | 301 | { |
| 302 | if (kfifo_avail(fifo) < len + recsize) | ||
| 303 | return len + 1; | ||
| 182 | 304 | ||
| 183 | /* then get the rest (if any) from the beginning of the buffer */ | 305 | return __kfifo_from_user_data(fifo, from, len, recsize); |
| 184 | memcpy(buffer + l, fifo->buffer, len - l); | 306 | } |
| 307 | EXPORT_SYMBOL(__kfifo_from_user_n); | ||
| 185 | 308 | ||
| 186 | /* | 309 | /** |
| 187 | * Ensure that we remove the bytes from the kfifo -before- | 310 | * kfifo_from_user - puts some data from user space into the FIFO |
| 188 | * we update the fifo->out index. | 311 | * @fifo: the fifo to be used. |
| 189 | */ | 312 | * @from: pointer to the data to be added. |
| 313 | * @len: the length of the data to be added. | ||
| 314 | * | ||
| 315 | * This function copies at most @len bytes from the @from into the | ||
| 316 | * FIFO depending and returns the number of copied bytes. | ||
| 317 | * | ||
| 318 | * Note that with only one concurrent reader and one concurrent | ||
| 319 | * writer, you don't need extra locking to use these functions. | ||
| 320 | */ | ||
| 321 | unsigned int kfifo_from_user(struct kfifo *fifo, | ||
| 322 | const void __user *from, unsigned int len) | ||
| 323 | { | ||
| 324 | len = min(kfifo_avail(fifo), len); | ||
| 325 | len -= __kfifo_from_user_data(fifo, from, len, 0); | ||
| 326 | __kfifo_add_in(fifo, len); | ||
| 327 | return len; | ||
| 328 | } | ||
| 329 | EXPORT_SYMBOL(kfifo_from_user); | ||
| 190 | 330 | ||
| 191 | smp_mb(); | 331 | unsigned int __kfifo_from_user_generic(struct kfifo *fifo, |
| 332 | const void __user *from, unsigned int len, unsigned int recsize) | ||
| 333 | { | ||
| 334 | return __kfifo_from_user_rec(fifo, from, len, recsize); | ||
| 335 | } | ||
| 336 | EXPORT_SYMBOL(__kfifo_from_user_generic); | ||
| 192 | 337 | ||
| 193 | fifo->out += len; | 338 | unsigned int __kfifo_to_user_n(struct kfifo *fifo, |
| 339 | void __user *to, unsigned int len, unsigned int reclen, | ||
| 340 | unsigned int recsize) | ||
| 341 | { | ||
| 342 | unsigned int ret; | ||
| 343 | |||
| 344 | if (kfifo_len(fifo) < reclen + recsize) | ||
| 345 | return len; | ||
| 346 | |||
| 347 | ret = __kfifo_to_user_data(fifo, to, reclen, recsize); | ||
| 194 | 348 | ||
| 349 | if (likely(ret == 0)) | ||
| 350 | __kfifo_add_out(fifo, reclen + recsize); | ||
| 351 | |||
| 352 | return ret; | ||
| 353 | } | ||
| 354 | EXPORT_SYMBOL(__kfifo_to_user_n); | ||
| 355 | |||
| 356 | /** | ||
| 357 | * kfifo_to_user - gets data from the FIFO and write it to user space | ||
| 358 | * @fifo: the fifo to be used. | ||
| 359 | * @to: where the data must be copied. | ||
| 360 | * @len: the size of the destination buffer. | ||
| 361 | * | ||
| 362 | * This function copies at most @len bytes from the FIFO into the | ||
| 363 | * @to buffer and returns the number of copied bytes. | ||
| 364 | * | ||
| 365 | * Note that with only one concurrent reader and one concurrent | ||
| 366 | * writer, you don't need extra locking to use these functions. | ||
| 367 | */ | ||
| 368 | unsigned int kfifo_to_user(struct kfifo *fifo, | ||
| 369 | void __user *to, unsigned int len) | ||
| 370 | { | ||
| 371 | len = min(kfifo_len(fifo), len); | ||
| 372 | len -= __kfifo_to_user_data(fifo, to, len, 0); | ||
| 373 | __kfifo_add_out(fifo, len); | ||
| 195 | return len; | 374 | return len; |
| 196 | } | 375 | } |
| 197 | EXPORT_SYMBOL(__kfifo_get); | 376 | EXPORT_SYMBOL(kfifo_to_user); |
| 377 | |||
| 378 | unsigned int __kfifo_to_user_generic(struct kfifo *fifo, | ||
| 379 | void __user *to, unsigned int len, unsigned int recsize, | ||
| 380 | unsigned int *total) | ||
| 381 | { | ||
| 382 | return __kfifo_to_user_rec(fifo, to, len, recsize, total); | ||
| 383 | } | ||
| 384 | EXPORT_SYMBOL(__kfifo_to_user_generic); | ||
| 385 | |||
| 386 | unsigned int __kfifo_peek_generic(struct kfifo *fifo, unsigned int recsize) | ||
| 387 | { | ||
| 388 | if (recsize == 0) | ||
| 389 | return kfifo_avail(fifo); | ||
| 390 | |||
| 391 | return __kfifo_peek_n(fifo, recsize); | ||
| 392 | } | ||
| 393 | EXPORT_SYMBOL(__kfifo_peek_generic); | ||
| 394 | |||
| 395 | void __kfifo_skip_generic(struct kfifo *fifo, unsigned int recsize) | ||
| 396 | { | ||
| 397 | __kfifo_skip_rec(fifo, recsize); | ||
| 398 | } | ||
| 399 | EXPORT_SYMBOL(__kfifo_skip_generic); | ||
| 400 | |||
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index e0eb4a2fe183..1f38270f08c7 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -4724,7 +4724,7 @@ SYSCALL_DEFINE5(perf_event_open, | |||
| 4724 | if (IS_ERR(event)) | 4724 | if (IS_ERR(event)) |
| 4725 | goto err_put_context; | 4725 | goto err_put_context; |
| 4726 | 4726 | ||
| 4727 | err = anon_inode_getfd("[perf_event]", &perf_fops, event, 0); | 4727 | err = anon_inode_getfd("[perf_event]", &perf_fops, event, O_RDWR); |
| 4728 | if (err < 0) | 4728 | if (err < 0) |
| 4729 | goto err_free_put_context; | 4729 | goto err_free_put_context; |
| 4730 | 4730 | ||
diff --git a/kernel/resource.c b/kernel/resource.c index dc15686b7a77..af96c1e4b54b 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
| @@ -308,37 +308,37 @@ static int find_resource(struct resource *root, struct resource *new, | |||
| 308 | void *alignf_data) | 308 | void *alignf_data) |
| 309 | { | 309 | { |
| 310 | struct resource *this = root->child; | 310 | struct resource *this = root->child; |
| 311 | resource_size_t start, end; | 311 | struct resource tmp = *new; |
| 312 | 312 | ||
| 313 | start = root->start; | 313 | tmp.start = root->start; |
| 314 | /* | 314 | /* |
| 315 | * Skip past an allocated resource that starts at 0, since the assignment | 315 | * Skip past an allocated resource that starts at 0, since the assignment |
| 316 | * of this->start - 1 to new->end below would cause an underflow. | 316 | * of this->start - 1 to tmp->end below would cause an underflow. |
| 317 | */ | 317 | */ |
| 318 | if (this && this->start == 0) { | 318 | if (this && this->start == 0) { |
| 319 | start = this->end + 1; | 319 | tmp.start = this->end + 1; |
| 320 | this = this->sibling; | 320 | this = this->sibling; |
| 321 | } | 321 | } |
| 322 | for(;;) { | 322 | for(;;) { |
| 323 | if (this) | 323 | if (this) |
| 324 | end = this->start - 1; | 324 | tmp.end = this->start - 1; |
| 325 | else | 325 | else |
| 326 | end = root->end; | 326 | tmp.end = root->end; |
| 327 | if (start < min) | 327 | if (tmp.start < min) |
| 328 | start = min; | 328 | tmp.start = min; |
| 329 | if (end > max) | 329 | if (tmp.end > max) |
| 330 | end = max; | 330 | tmp.end = max; |
| 331 | start = ALIGN(start, align); | 331 | tmp.start = ALIGN(tmp.start, align); |
| 332 | if (alignf) | 332 | if (alignf) |
| 333 | alignf(alignf_data, new, size, align); | 333 | alignf(alignf_data, &tmp, size, align); |
| 334 | if (start < end && end - start >= size - 1) { | 334 | if (tmp.start < tmp.end && tmp.end - tmp.start >= size - 1) { |
| 335 | new->start = start; | 335 | new->start = tmp.start; |
| 336 | new->end = start + size - 1; | 336 | new->end = tmp.start + size - 1; |
| 337 | return 0; | 337 | return 0; |
| 338 | } | 338 | } |
| 339 | if (!this) | 339 | if (!this) |
| 340 | break; | 340 | break; |
| 341 | start = this->end + 1; | 341 | tmp.start = this->end + 1; |
| 342 | this = this->sibling; | 342 | this = this->sibling; |
| 343 | } | 343 | } |
| 344 | return -EBUSY; | 344 | return -EBUSY; |
diff --git a/kernel/sched.c b/kernel/sched.c index 720df108a2d6..87f1f47beffe 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -26,8 +26,6 @@ | |||
| 26 | * Thomas Gleixner, Mike Kravetz | 26 | * Thomas Gleixner, Mike Kravetz |
| 27 | */ | 27 | */ |
| 28 | 28 | ||
| 29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 30 | |||
| 31 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
| 32 | #include <linux/module.h> | 30 | #include <linux/module.h> |
| 33 | #include <linux/nmi.h> | 31 | #include <linux/nmi.h> |
| @@ -2348,7 +2346,7 @@ int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) | |||
| 2348 | * not worry about this generic constraint ] | 2346 | * not worry about this generic constraint ] |
| 2349 | */ | 2347 | */ |
| 2350 | if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) || | 2348 | if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) || |
| 2351 | !cpu_active(cpu))) | 2349 | !cpu_online(cpu))) |
| 2352 | cpu = select_fallback_rq(task_cpu(p), p); | 2350 | cpu = select_fallback_rq(task_cpu(p), p); |
| 2353 | 2351 | ||
| 2354 | return cpu; | 2352 | return cpu; |
| @@ -5375,8 +5373,8 @@ static noinline void __schedule_bug(struct task_struct *prev) | |||
| 5375 | { | 5373 | { |
| 5376 | struct pt_regs *regs = get_irq_regs(); | 5374 | struct pt_regs *regs = get_irq_regs(); |
| 5377 | 5375 | ||
| 5378 | pr_err("BUG: scheduling while atomic: %s/%d/0x%08x\n", | 5376 | printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", |
| 5379 | prev->comm, prev->pid, preempt_count()); | 5377 | prev->comm, prev->pid, preempt_count()); |
| 5380 | 5378 | ||
| 5381 | debug_show_held_locks(prev); | 5379 | debug_show_held_locks(prev); |
| 5382 | print_modules(); | 5380 | print_modules(); |
| @@ -6940,23 +6938,23 @@ void sched_show_task(struct task_struct *p) | |||
| 6940 | unsigned state; | 6938 | unsigned state; |
| 6941 | 6939 | ||
| 6942 | state = p->state ? __ffs(p->state) + 1 : 0; | 6940 | state = p->state ? __ffs(p->state) + 1 : 0; |
| 6943 | pr_info("%-13.13s %c", p->comm, | 6941 | printk(KERN_INFO "%-13.13s %c", p->comm, |
| 6944 | state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); | 6942 | state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); |
| 6945 | #if BITS_PER_LONG == 32 | 6943 | #if BITS_PER_LONG == 32 |
| 6946 | if (state == TASK_RUNNING) | 6944 | if (state == TASK_RUNNING) |
| 6947 | pr_cont(" running "); | 6945 | printk(KERN_CONT " running "); |
| 6948 | else | 6946 | else |
| 6949 | pr_cont(" %08lx ", thread_saved_pc(p)); | 6947 | printk(KERN_CONT " %08lx ", thread_saved_pc(p)); |
| 6950 | #else | 6948 | #else |
| 6951 | if (state == TASK_RUNNING) | 6949 | if (state == TASK_RUNNING) |
| 6952 | pr_cont(" running task "); | 6950 | printk(KERN_CONT " running task "); |
| 6953 | else | 6951 | else |
| 6954 | pr_cont(" %016lx ", thread_saved_pc(p)); | 6952 | printk(KERN_CONT " %016lx ", thread_saved_pc(p)); |
| 6955 | #endif | 6953 | #endif |
| 6956 | #ifdef CONFIG_DEBUG_STACK_USAGE | 6954 | #ifdef CONFIG_DEBUG_STACK_USAGE |
| 6957 | free = stack_not_used(p); | 6955 | free = stack_not_used(p); |
| 6958 | #endif | 6956 | #endif |
| 6959 | pr_cont("%5lu %5d %6d 0x%08lx\n", free, | 6957 | printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, |
| 6960 | task_pid_nr(p), task_pid_nr(p->real_parent), | 6958 | task_pid_nr(p), task_pid_nr(p->real_parent), |
| 6961 | (unsigned long)task_thread_info(p)->flags); | 6959 | (unsigned long)task_thread_info(p)->flags); |
| 6962 | 6960 | ||
| @@ -6968,9 +6966,11 @@ void show_state_filter(unsigned long state_filter) | |||
| 6968 | struct task_struct *g, *p; | 6966 | struct task_struct *g, *p; |
| 6969 | 6967 | ||
| 6970 | #if BITS_PER_LONG == 32 | 6968 | #if BITS_PER_LONG == 32 |
| 6971 | pr_info(" task PC stack pid father\n"); | 6969 | printk(KERN_INFO |
| 6970 | " task PC stack pid father\n"); | ||
| 6972 | #else | 6971 | #else |
| 6973 | pr_info(" task PC stack pid father\n"); | 6972 | printk(KERN_INFO |
| 6973 | " task PC stack pid father\n"); | ||
| 6974 | #endif | 6974 | #endif |
| 6975 | read_lock(&tasklist_lock); | 6975 | read_lock(&tasklist_lock); |
| 6976 | do_each_thread(g, p) { | 6976 | do_each_thread(g, p) { |
| @@ -7828,44 +7828,48 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
| 7828 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); | 7828 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); |
| 7829 | 7829 | ||
| 7830 | if (!(sd->flags & SD_LOAD_BALANCE)) { | 7830 | if (!(sd->flags & SD_LOAD_BALANCE)) { |
| 7831 | pr_cont("does not load-balance\n"); | 7831 | printk("does not load-balance\n"); |
| 7832 | if (sd->parent) | 7832 | if (sd->parent) |
| 7833 | pr_err("ERROR: !SD_LOAD_BALANCE domain has parent\n"); | 7833 | printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" |
| 7834 | " has parent"); | ||
| 7834 | return -1; | 7835 | return -1; |
| 7835 | } | 7836 | } |
| 7836 | 7837 | ||
| 7837 | pr_cont("span %s level %s\n", str, sd->name); | 7838 | printk(KERN_CONT "span %s level %s\n", str, sd->name); |
| 7838 | 7839 | ||
| 7839 | if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { | 7840 | if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
| 7840 | pr_err("ERROR: domain->span does not contain CPU%d\n", cpu); | 7841 | printk(KERN_ERR "ERROR: domain->span does not contain " |
| 7842 | "CPU%d\n", cpu); | ||
| 7841 | } | 7843 | } |
| 7842 | if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { | 7844 | if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { |
| 7843 | pr_err("ERROR: domain->groups does not contain CPU%d\n", cpu); | 7845 | printk(KERN_ERR "ERROR: domain->groups does not contain" |
| 7846 | " CPU%d\n", cpu); | ||
| 7844 | } | 7847 | } |
| 7845 | 7848 | ||
| 7846 | printk(KERN_DEBUG "%*s groups:", level + 1, ""); | 7849 | printk(KERN_DEBUG "%*s groups:", level + 1, ""); |
| 7847 | do { | 7850 | do { |
| 7848 | if (!group) { | 7851 | if (!group) { |
| 7849 | pr_cont("\n"); | 7852 | printk("\n"); |
| 7850 | pr_err("ERROR: group is NULL\n"); | 7853 | printk(KERN_ERR "ERROR: group is NULL\n"); |
| 7851 | break; | 7854 | break; |
| 7852 | } | 7855 | } |
| 7853 | 7856 | ||
| 7854 | if (!group->cpu_power) { | 7857 | if (!group->cpu_power) { |
| 7855 | pr_cont("\n"); | 7858 | printk(KERN_CONT "\n"); |
| 7856 | pr_err("ERROR: domain->cpu_power not set\n"); | 7859 | printk(KERN_ERR "ERROR: domain->cpu_power not " |
| 7860 | "set\n"); | ||
| 7857 | break; | 7861 | break; |
| 7858 | } | 7862 | } |
| 7859 | 7863 | ||
| 7860 | if (!cpumask_weight(sched_group_cpus(group))) { | 7864 | if (!cpumask_weight(sched_group_cpus(group))) { |
| 7861 | pr_cont("\n"); | 7865 | printk(KERN_CONT "\n"); |
| 7862 | pr_err("ERROR: empty group\n"); | 7866 | printk(KERN_ERR "ERROR: empty group\n"); |
| 7863 | break; | 7867 | break; |
| 7864 | } | 7868 | } |
| 7865 | 7869 | ||
| 7866 | if (cpumask_intersects(groupmask, sched_group_cpus(group))) { | 7870 | if (cpumask_intersects(groupmask, sched_group_cpus(group))) { |
| 7867 | pr_cont("\n"); | 7871 | printk(KERN_CONT "\n"); |
| 7868 | pr_err("ERROR: repeated CPUs\n"); | 7872 | printk(KERN_ERR "ERROR: repeated CPUs\n"); |
| 7869 | break; | 7873 | break; |
| 7870 | } | 7874 | } |
| 7871 | 7875 | ||
| @@ -7873,21 +7877,23 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
| 7873 | 7877 | ||
| 7874 | cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); | 7878 | cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); |
| 7875 | 7879 | ||
| 7876 | pr_cont(" %s", str); | 7880 | printk(KERN_CONT " %s", str); |
| 7877 | if (group->cpu_power != SCHED_LOAD_SCALE) { | 7881 | if (group->cpu_power != SCHED_LOAD_SCALE) { |
| 7878 | pr_cont(" (cpu_power = %d)", group->cpu_power); | 7882 | printk(KERN_CONT " (cpu_power = %d)", |
| 7883 | group->cpu_power); | ||
| 7879 | } | 7884 | } |
| 7880 | 7885 | ||
| 7881 | group = group->next; | 7886 | group = group->next; |
| 7882 | } while (group != sd->groups); | 7887 | } while (group != sd->groups); |
| 7883 | pr_cont("\n"); | 7888 | printk(KERN_CONT "\n"); |
| 7884 | 7889 | ||
| 7885 | if (!cpumask_equal(sched_domain_span(sd), groupmask)) | 7890 | if (!cpumask_equal(sched_domain_span(sd), groupmask)) |
| 7886 | pr_err("ERROR: groups don't span domain->span\n"); | 7891 | printk(KERN_ERR "ERROR: groups don't span domain->span\n"); |
| 7887 | 7892 | ||
| 7888 | if (sd->parent && | 7893 | if (sd->parent && |
| 7889 | !cpumask_subset(groupmask, sched_domain_span(sd->parent))) | 7894 | !cpumask_subset(groupmask, sched_domain_span(sd->parent))) |
| 7890 | pr_err("ERROR: parent span is not a superset of domain->span\n"); | 7895 | printk(KERN_ERR "ERROR: parent span is not a superset " |
| 7896 | "of domain->span\n"); | ||
| 7891 | return 0; | 7897 | return 0; |
| 7892 | } | 7898 | } |
| 7893 | 7899 | ||
| @@ -8443,7 +8449,8 @@ static int build_numa_sched_groups(struct s_data *d, | |||
| 8443 | sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), | 8449 | sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), |
| 8444 | GFP_KERNEL, num); | 8450 | GFP_KERNEL, num); |
| 8445 | if (!sg) { | 8451 | if (!sg) { |
| 8446 | pr_warning("Can not alloc domain group for node %d\n", num); | 8452 | printk(KERN_WARNING "Can not alloc domain group for node %d\n", |
| 8453 | num); | ||
| 8447 | return -ENOMEM; | 8454 | return -ENOMEM; |
| 8448 | } | 8455 | } |
| 8449 | d->sched_group_nodes[num] = sg; | 8456 | d->sched_group_nodes[num] = sg; |
| @@ -8472,8 +8479,8 @@ static int build_numa_sched_groups(struct s_data *d, | |||
| 8472 | sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), | 8479 | sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), |
| 8473 | GFP_KERNEL, num); | 8480 | GFP_KERNEL, num); |
| 8474 | if (!sg) { | 8481 | if (!sg) { |
| 8475 | pr_warning("Can not alloc domain group for node %d\n", | 8482 | printk(KERN_WARNING |
| 8476 | j); | 8483 | "Can not alloc domain group for node %d\n", j); |
| 8477 | return -ENOMEM; | 8484 | return -ENOMEM; |
| 8478 | } | 8485 | } |
| 8479 | sg->cpu_power = 0; | 8486 | sg->cpu_power = 0; |
| @@ -8701,7 +8708,7 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, | |||
| 8701 | d->sched_group_nodes = kcalloc(nr_node_ids, | 8708 | d->sched_group_nodes = kcalloc(nr_node_ids, |
| 8702 | sizeof(struct sched_group *), GFP_KERNEL); | 8709 | sizeof(struct sched_group *), GFP_KERNEL); |
| 8703 | if (!d->sched_group_nodes) { | 8710 | if (!d->sched_group_nodes) { |
| 8704 | pr_warning("Can not alloc sched group node list\n"); | 8711 | printk(KERN_WARNING "Can not alloc sched group node list\n"); |
| 8705 | return sa_notcovered; | 8712 | return sa_notcovered; |
| 8706 | } | 8713 | } |
| 8707 | sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes; | 8714 | sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes; |
| @@ -8718,7 +8725,7 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, | |||
| 8718 | return sa_send_covered; | 8725 | return sa_send_covered; |
| 8719 | d->rd = alloc_rootdomain(); | 8726 | d->rd = alloc_rootdomain(); |
| 8720 | if (!d->rd) { | 8727 | if (!d->rd) { |
| 8721 | pr_warning("Cannot alloc root domain\n"); | 8728 | printk(KERN_WARNING "Cannot alloc root domain\n"); |
| 8722 | return sa_tmpmask; | 8729 | return sa_tmpmask; |
| 8723 | } | 8730 | } |
| 8724 | return sa_rootdomain; | 8731 | return sa_rootdomain; |
| @@ -9700,11 +9707,13 @@ void __might_sleep(char *file, int line, int preempt_offset) | |||
| 9700 | return; | 9707 | return; |
| 9701 | prev_jiffy = jiffies; | 9708 | prev_jiffy = jiffies; |
| 9702 | 9709 | ||
| 9703 | pr_err("BUG: sleeping function called from invalid context at %s:%d\n", | 9710 | printk(KERN_ERR |
| 9704 | file, line); | 9711 | "BUG: sleeping function called from invalid context at %s:%d\n", |
| 9705 | pr_err("in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", | 9712 | file, line); |
| 9706 | in_atomic(), irqs_disabled(), | 9713 | printk(KERN_ERR |
| 9707 | current->pid, current->comm); | 9714 | "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", |
| 9715 | in_atomic(), irqs_disabled(), | ||
| 9716 | current->pid, current->comm); | ||
| 9708 | 9717 | ||
| 9709 | debug_show_held_locks(current); | 9718 | debug_show_held_locks(current); |
| 9710 | if (irqs_disabled()) | 9719 | if (irqs_disabled()) |
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index 21b969a28725..5f93b570d383 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c | |||
| @@ -35,7 +35,7 @@ static void | |||
| 35 | dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep) | 35 | dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep) |
| 36 | { | 36 | { |
| 37 | raw_spin_unlock_irq(&rq->lock); | 37 | raw_spin_unlock_irq(&rq->lock); |
| 38 | pr_err("bad: scheduling from the idle thread!\n"); | 38 | printk(KERN_ERR "bad: scheduling from the idle thread!\n"); |
| 39 | dump_stack(); | 39 | dump_stack(); |
| 40 | raw_spin_lock_irq(&rq->lock); | 40 | raw_spin_lock_irq(&rq->lock); |
| 41 | } | 41 | } |
diff --git a/kernel/time.c b/kernel/time.c index c6324d96009e..804798005d19 100644 --- a/kernel/time.c +++ b/kernel/time.c | |||
| @@ -136,6 +136,7 @@ static inline void warp_clock(void) | |||
| 136 | write_seqlock_irq(&xtime_lock); | 136 | write_seqlock_irq(&xtime_lock); |
| 137 | wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60; | 137 | wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60; |
| 138 | xtime.tv_sec += sys_tz.tz_minuteswest * 60; | 138 | xtime.tv_sec += sys_tz.tz_minuteswest * 60; |
| 139 | update_xtime_cache(0); | ||
| 139 | write_sequnlock_irq(&xtime_lock); | 140 | write_sequnlock_irq(&xtime_lock); |
| 140 | clock_was_set(); | 141 | clock_was_set(); |
| 141 | } | 142 | } |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index af4135f05825..7faaa32fbf4f 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
| @@ -165,6 +165,13 @@ struct timespec raw_time; | |||
| 165 | /* flag for if timekeeping is suspended */ | 165 | /* flag for if timekeeping is suspended */ |
| 166 | int __read_mostly timekeeping_suspended; | 166 | int __read_mostly timekeeping_suspended; |
| 167 | 167 | ||
| 168 | static struct timespec xtime_cache __attribute__ ((aligned (16))); | ||
| 169 | void update_xtime_cache(u64 nsec) | ||
| 170 | { | ||
| 171 | xtime_cache = xtime; | ||
| 172 | timespec_add_ns(&xtime_cache, nsec); | ||
| 173 | } | ||
| 174 | |||
| 168 | /* must hold xtime_lock */ | 175 | /* must hold xtime_lock */ |
| 169 | void timekeeping_leap_insert(int leapsecond) | 176 | void timekeeping_leap_insert(int leapsecond) |
| 170 | { | 177 | { |
| @@ -325,6 +332,8 @@ int do_settimeofday(struct timespec *tv) | |||
| 325 | 332 | ||
| 326 | xtime = *tv; | 333 | xtime = *tv; |
| 327 | 334 | ||
| 335 | update_xtime_cache(0); | ||
| 336 | |||
| 328 | timekeeper.ntp_error = 0; | 337 | timekeeper.ntp_error = 0; |
| 329 | ntp_clear(); | 338 | ntp_clear(); |
| 330 | 339 | ||
| @@ -550,6 +559,7 @@ void __init timekeeping_init(void) | |||
| 550 | } | 559 | } |
| 551 | set_normalized_timespec(&wall_to_monotonic, | 560 | set_normalized_timespec(&wall_to_monotonic, |
| 552 | -boot.tv_sec, -boot.tv_nsec); | 561 | -boot.tv_sec, -boot.tv_nsec); |
| 562 | update_xtime_cache(0); | ||
| 553 | total_sleep_time.tv_sec = 0; | 563 | total_sleep_time.tv_sec = 0; |
| 554 | total_sleep_time.tv_nsec = 0; | 564 | total_sleep_time.tv_nsec = 0; |
| 555 | write_sequnlock_irqrestore(&xtime_lock, flags); | 565 | write_sequnlock_irqrestore(&xtime_lock, flags); |
| @@ -583,6 +593,7 @@ static int timekeeping_resume(struct sys_device *dev) | |||
| 583 | wall_to_monotonic = timespec_sub(wall_to_monotonic, ts); | 593 | wall_to_monotonic = timespec_sub(wall_to_monotonic, ts); |
| 584 | total_sleep_time = timespec_add_safe(total_sleep_time, ts); | 594 | total_sleep_time = timespec_add_safe(total_sleep_time, ts); |
| 585 | } | 595 | } |
| 596 | update_xtime_cache(0); | ||
| 586 | /* re-base the last cycle value */ | 597 | /* re-base the last cycle value */ |
| 587 | timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); | 598 | timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); |
| 588 | timekeeper.ntp_error = 0; | 599 | timekeeper.ntp_error = 0; |
| @@ -722,6 +733,7 @@ static void timekeeping_adjust(s64 offset) | |||
| 722 | timekeeper.ntp_error_shift; | 733 | timekeeper.ntp_error_shift; |
| 723 | } | 734 | } |
| 724 | 735 | ||
| 736 | |||
| 725 | /** | 737 | /** |
| 726 | * logarithmic_accumulation - shifted accumulation of cycles | 738 | * logarithmic_accumulation - shifted accumulation of cycles |
| 727 | * | 739 | * |
| @@ -765,6 +777,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift) | |||
| 765 | return offset; | 777 | return offset; |
| 766 | } | 778 | } |
| 767 | 779 | ||
| 780 | |||
| 768 | /** | 781 | /** |
| 769 | * update_wall_time - Uses the current clocksource to increment the wall time | 782 | * update_wall_time - Uses the current clocksource to increment the wall time |
| 770 | * | 783 | * |
| @@ -774,6 +787,7 @@ void update_wall_time(void) | |||
| 774 | { | 787 | { |
| 775 | struct clocksource *clock; | 788 | struct clocksource *clock; |
| 776 | cycle_t offset; | 789 | cycle_t offset; |
| 790 | u64 nsecs; | ||
| 777 | int shift = 0, maxshift; | 791 | int shift = 0, maxshift; |
| 778 | 792 | ||
| 779 | /* Make sure we're fully resumed: */ | 793 | /* Make sure we're fully resumed: */ |
| @@ -839,6 +853,9 @@ void update_wall_time(void) | |||
| 839 | timekeeper.ntp_error += timekeeper.xtime_nsec << | 853 | timekeeper.ntp_error += timekeeper.xtime_nsec << |
| 840 | timekeeper.ntp_error_shift; | 854 | timekeeper.ntp_error_shift; |
| 841 | 855 | ||
| 856 | nsecs = clocksource_cyc2ns(offset, timekeeper.mult, timekeeper.shift); | ||
| 857 | update_xtime_cache(nsecs); | ||
| 858 | |||
| 842 | /* check to see if there is a new clocksource to use */ | 859 | /* check to see if there is a new clocksource to use */ |
| 843 | update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); | 860 | update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); |
| 844 | } | 861 | } |
| @@ -875,13 +892,13 @@ void monotonic_to_bootbased(struct timespec *ts) | |||
| 875 | 892 | ||
| 876 | unsigned long get_seconds(void) | 893 | unsigned long get_seconds(void) |
| 877 | { | 894 | { |
| 878 | return xtime.tv_sec; | 895 | return xtime_cache.tv_sec; |
| 879 | } | 896 | } |
| 880 | EXPORT_SYMBOL(get_seconds); | 897 | EXPORT_SYMBOL(get_seconds); |
| 881 | 898 | ||
| 882 | struct timespec __current_kernel_time(void) | 899 | struct timespec __current_kernel_time(void) |
| 883 | { | 900 | { |
| 884 | return xtime; | 901 | return xtime_cache; |
| 885 | } | 902 | } |
| 886 | 903 | ||
| 887 | struct timespec current_kernel_time(void) | 904 | struct timespec current_kernel_time(void) |
| @@ -891,7 +908,8 @@ struct timespec current_kernel_time(void) | |||
| 891 | 908 | ||
| 892 | do { | 909 | do { |
| 893 | seq = read_seqbegin(&xtime_lock); | 910 | seq = read_seqbegin(&xtime_lock); |
| 894 | now = xtime; | 911 | |
| 912 | now = xtime_cache; | ||
| 895 | } while (read_seqretry(&xtime_lock, seq)); | 913 | } while (read_seqretry(&xtime_lock, seq)); |
| 896 | 914 | ||
| 897 | return now; | 915 | return now; |
| @@ -905,7 +923,8 @@ struct timespec get_monotonic_coarse(void) | |||
| 905 | 923 | ||
| 906 | do { | 924 | do { |
| 907 | seq = read_seqbegin(&xtime_lock); | 925 | seq = read_seqbegin(&xtime_lock); |
| 908 | now = xtime; | 926 | |
| 927 | now = xtime_cache; | ||
| 909 | mono = wall_to_monotonic; | 928 | mono = wall_to_monotonic; |
| 910 | } while (read_seqretry(&xtime_lock, seq)); | 929 | } while (read_seqretry(&xtime_lock, seq)); |
| 911 | 930 | ||
