diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cgroup.c | 1 | ||||
-rw-r--r-- | kernel/futex.c | 27 | ||||
-rw-r--r-- | kernel/hw_breakpoint.c | 10 | ||||
-rw-r--r-- | kernel/kfifo.c | 107 | ||||
-rw-r--r-- | kernel/kmod.c | 12 | ||||
-rw-r--r-- | kernel/kprobes.c | 2 | ||||
-rw-r--r-- | kernel/module.c | 17 | ||||
-rw-r--r-- | kernel/perf_event.c | 14 | ||||
-rw-r--r-- | kernel/sched.c | 5 | ||||
-rw-r--r-- | kernel/sched_fair.c | 2 | ||||
-rw-r--r-- | kernel/signal.c | 3 | ||||
-rw-r--r-- | kernel/smp.c | 2 | ||||
-rw-r--r-- | kernel/sysctl_binary.c | 31 | ||||
-rw-r--r-- | kernel/timer.c | 3 | ||||
-rw-r--r-- | kernel/trace/Kconfig | 112 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 6 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_events_filter.c | 29 | ||||
-rw-r--r-- | kernel/trace/trace_export.c | 7 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 7 | ||||
-rw-r--r-- | kernel/trace/trace_ksym.c | 140 |
22 files changed, 309 insertions, 234 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 0249f4be9b5c..1fbcc748044a 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -2468,7 +2468,6 @@ static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp, | |||
2468 | /* make sure l doesn't vanish out from under us */ | 2468 | /* make sure l doesn't vanish out from under us */ |
2469 | down_write(&l->mutex); | 2469 | down_write(&l->mutex); |
2470 | mutex_unlock(&cgrp->pidlist_mutex); | 2470 | mutex_unlock(&cgrp->pidlist_mutex); |
2471 | l->use_count++; | ||
2472 | return l; | 2471 | return l; |
2473 | } | 2472 | } |
2474 | } | 2473 | } |
diff --git a/kernel/futex.c b/kernel/futex.c index 8e3c3ffe1b9a..d9b3a2228f9d 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -203,8 +203,6 @@ static void drop_futex_key_refs(union futex_key *key) | |||
203 | * @uaddr: virtual address of the futex | 203 | * @uaddr: virtual address of the futex |
204 | * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED | 204 | * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED |
205 | * @key: address where result is stored. | 205 | * @key: address where result is stored. |
206 | * @rw: mapping needs to be read/write (values: VERIFY_READ, | ||
207 | * VERIFY_WRITE) | ||
208 | * | 206 | * |
209 | * Returns a negative error code or 0 | 207 | * Returns a negative error code or 0 |
210 | * The key words are stored in *key on success. | 208 | * The key words are stored in *key on success. |
@@ -216,7 +214,7 @@ static void drop_futex_key_refs(union futex_key *key) | |||
216 | * lock_page() might sleep, the caller should not hold a spinlock. | 214 | * lock_page() might sleep, the caller should not hold a spinlock. |
217 | */ | 215 | */ |
218 | static int | 216 | static int |
219 | get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) | 217 | get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) |
220 | { | 218 | { |
221 | unsigned long address = (unsigned long)uaddr; | 219 | unsigned long address = (unsigned long)uaddr; |
222 | struct mm_struct *mm = current->mm; | 220 | struct mm_struct *mm = current->mm; |
@@ -239,7 +237,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) | |||
239 | * but access_ok() should be faster than find_vma() | 237 | * but access_ok() should be faster than find_vma() |
240 | */ | 238 | */ |
241 | if (!fshared) { | 239 | if (!fshared) { |
242 | if (unlikely(!access_ok(rw, uaddr, sizeof(u32)))) | 240 | if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) |
243 | return -EFAULT; | 241 | return -EFAULT; |
244 | key->private.mm = mm; | 242 | key->private.mm = mm; |
245 | key->private.address = address; | 243 | key->private.address = address; |
@@ -248,7 +246,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) | |||
248 | } | 246 | } |
249 | 247 | ||
250 | again: | 248 | again: |
251 | err = get_user_pages_fast(address, 1, rw == VERIFY_WRITE, &page); | 249 | err = get_user_pages_fast(address, 1, 1, &page); |
252 | if (err < 0) | 250 | if (err < 0) |
253 | return err; | 251 | return err; |
254 | 252 | ||
@@ -867,7 +865,7 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset) | |||
867 | if (!bitset) | 865 | if (!bitset) |
868 | return -EINVAL; | 866 | return -EINVAL; |
869 | 867 | ||
870 | ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ); | 868 | ret = get_futex_key(uaddr, fshared, &key); |
871 | if (unlikely(ret != 0)) | 869 | if (unlikely(ret != 0)) |
872 | goto out; | 870 | goto out; |
873 | 871 | ||
@@ -913,10 +911,10 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, | |||
913 | int ret, op_ret; | 911 | int ret, op_ret; |
914 | 912 | ||
915 | retry: | 913 | retry: |
916 | ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ); | 914 | ret = get_futex_key(uaddr1, fshared, &key1); |
917 | if (unlikely(ret != 0)) | 915 | if (unlikely(ret != 0)) |
918 | goto out; | 916 | goto out; |
919 | ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE); | 917 | ret = get_futex_key(uaddr2, fshared, &key2); |
920 | if (unlikely(ret != 0)) | 918 | if (unlikely(ret != 0)) |
921 | goto out_put_key1; | 919 | goto out_put_key1; |
922 | 920 | ||
@@ -1175,11 +1173,10 @@ retry: | |||
1175 | pi_state = NULL; | 1173 | pi_state = NULL; |
1176 | } | 1174 | } |
1177 | 1175 | ||
1178 | ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ); | 1176 | ret = get_futex_key(uaddr1, fshared, &key1); |
1179 | if (unlikely(ret != 0)) | 1177 | if (unlikely(ret != 0)) |
1180 | goto out; | 1178 | goto out; |
1181 | ret = get_futex_key(uaddr2, fshared, &key2, | 1179 | ret = get_futex_key(uaddr2, fshared, &key2); |
1182 | requeue_pi ? VERIFY_WRITE : VERIFY_READ); | ||
1183 | if (unlikely(ret != 0)) | 1180 | if (unlikely(ret != 0)) |
1184 | goto out_put_key1; | 1181 | goto out_put_key1; |
1185 | 1182 | ||
@@ -1738,7 +1735,7 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, int fshared, | |||
1738 | */ | 1735 | */ |
1739 | retry: | 1736 | retry: |
1740 | q->key = FUTEX_KEY_INIT; | 1737 | q->key = FUTEX_KEY_INIT; |
1741 | ret = get_futex_key(uaddr, fshared, &q->key, VERIFY_READ); | 1738 | ret = get_futex_key(uaddr, fshared, &q->key); |
1742 | if (unlikely(ret != 0)) | 1739 | if (unlikely(ret != 0)) |
1743 | return ret; | 1740 | return ret; |
1744 | 1741 | ||
@@ -1904,7 +1901,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared, | |||
1904 | q.requeue_pi_key = NULL; | 1901 | q.requeue_pi_key = NULL; |
1905 | retry: | 1902 | retry: |
1906 | q.key = FUTEX_KEY_INIT; | 1903 | q.key = FUTEX_KEY_INIT; |
1907 | ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE); | 1904 | ret = get_futex_key(uaddr, fshared, &q.key); |
1908 | if (unlikely(ret != 0)) | 1905 | if (unlikely(ret != 0)) |
1909 | goto out; | 1906 | goto out; |
1910 | 1907 | ||
@@ -2023,7 +2020,7 @@ retry: | |||
2023 | if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) | 2020 | if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) |
2024 | return -EPERM; | 2021 | return -EPERM; |
2025 | 2022 | ||
2026 | ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE); | 2023 | ret = get_futex_key(uaddr, fshared, &key); |
2027 | if (unlikely(ret != 0)) | 2024 | if (unlikely(ret != 0)) |
2028 | goto out; | 2025 | goto out; |
2029 | 2026 | ||
@@ -2215,7 +2212,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, | |||
2215 | rt_waiter.task = NULL; | 2212 | rt_waiter.task = NULL; |
2216 | 2213 | ||
2217 | key2 = FUTEX_KEY_INIT; | 2214 | key2 = FUTEX_KEY_INIT; |
2218 | ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE); | 2215 | ret = get_futex_key(uaddr2, fshared, &key2); |
2219 | if (unlikely(ret != 0)) | 2216 | if (unlikely(ret != 0)) |
2220 | goto out; | 2217 | goto out; |
2221 | 2218 | ||
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index dbcbf6a33a08..50dbd5999588 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/percpu.h> | 40 | #include <linux/percpu.h> |
41 | #include <linux/sched.h> | 41 | #include <linux/sched.h> |
42 | #include <linux/init.h> | 42 | #include <linux/init.h> |
43 | #include <linux/cpu.h> | ||
43 | #include <linux/smp.h> | 44 | #include <linux/smp.h> |
44 | 45 | ||
45 | #include <linux/hw_breakpoint.h> | 46 | #include <linux/hw_breakpoint.h> |
@@ -388,7 +389,8 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr, | |||
388 | if (!cpu_events) | 389 | if (!cpu_events) |
389 | return ERR_PTR(-ENOMEM); | 390 | return ERR_PTR(-ENOMEM); |
390 | 391 | ||
391 | for_each_possible_cpu(cpu) { | 392 | get_online_cpus(); |
393 | for_each_online_cpu(cpu) { | ||
392 | pevent = per_cpu_ptr(cpu_events, cpu); | 394 | pevent = per_cpu_ptr(cpu_events, cpu); |
393 | bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered); | 395 | bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered); |
394 | 396 | ||
@@ -399,18 +401,20 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr, | |||
399 | goto fail; | 401 | goto fail; |
400 | } | 402 | } |
401 | } | 403 | } |
404 | put_online_cpus(); | ||
402 | 405 | ||
403 | return cpu_events; | 406 | return cpu_events; |
404 | 407 | ||
405 | fail: | 408 | fail: |
406 | for_each_possible_cpu(cpu) { | 409 | for_each_online_cpu(cpu) { |
407 | pevent = per_cpu_ptr(cpu_events, cpu); | 410 | pevent = per_cpu_ptr(cpu_events, cpu); |
408 | if (IS_ERR(*pevent)) | 411 | if (IS_ERR(*pevent)) |
409 | break; | 412 | break; |
410 | unregister_hw_breakpoint(*pevent); | 413 | unregister_hw_breakpoint(*pevent); |
411 | } | 414 | } |
415 | put_online_cpus(); | ||
416 | |||
412 | free_percpu(cpu_events); | 417 | free_percpu(cpu_events); |
413 | /* return the error if any */ | ||
414 | return ERR_PTR(err); | 418 | return ERR_PTR(err); |
415 | } | 419 | } |
416 | EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); | 420 | EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint); |
diff --git a/kernel/kfifo.c b/kernel/kfifo.c index e92d519f93b1..32c5c15d750d 100644 --- a/kernel/kfifo.c +++ b/kernel/kfifo.c | |||
@@ -28,7 +28,7 @@ | |||
28 | #include <linux/log2.h> | 28 | #include <linux/log2.h> |
29 | #include <linux/uaccess.h> | 29 | #include <linux/uaccess.h> |
30 | 30 | ||
31 | static void _kfifo_init(struct kfifo *fifo, unsigned char *buffer, | 31 | static void _kfifo_init(struct kfifo *fifo, void *buffer, |
32 | unsigned int size) | 32 | unsigned int size) |
33 | { | 33 | { |
34 | fifo->buffer = buffer; | 34 | fifo->buffer = buffer; |
@@ -41,10 +41,10 @@ static void _kfifo_init(struct kfifo *fifo, unsigned char *buffer, | |||
41 | * kfifo_init - initialize a FIFO using a preallocated buffer | 41 | * kfifo_init - initialize a FIFO using a preallocated buffer |
42 | * @fifo: the fifo to assign the buffer | 42 | * @fifo: the fifo to assign the buffer |
43 | * @buffer: the preallocated buffer to be used. | 43 | * @buffer: the preallocated buffer to be used. |
44 | * @size: the size of the internal buffer, this have to be a power of 2. | 44 | * @size: the size of the internal buffer, this has to be a power of 2. |
45 | * | 45 | * |
46 | */ | 46 | */ |
47 | void kfifo_init(struct kfifo *fifo, unsigned char *buffer, unsigned int size) | 47 | void kfifo_init(struct kfifo *fifo, void *buffer, unsigned int size) |
48 | { | 48 | { |
49 | /* size must be a power of 2 */ | 49 | /* size must be a power of 2 */ |
50 | BUG_ON(!is_power_of_2(size)); | 50 | BUG_ON(!is_power_of_2(size)); |
@@ -159,8 +159,9 @@ static inline void __kfifo_out_data(struct kfifo *fifo, | |||
159 | memcpy(to + l, fifo->buffer, len - l); | 159 | memcpy(to + l, fifo->buffer, len - l); |
160 | } | 160 | } |
161 | 161 | ||
162 | static inline unsigned int __kfifo_from_user_data(struct kfifo *fifo, | 162 | static inline int __kfifo_from_user_data(struct kfifo *fifo, |
163 | const void __user *from, unsigned int len, unsigned int off) | 163 | const void __user *from, unsigned int len, unsigned int off, |
164 | unsigned *lenout) | ||
164 | { | 165 | { |
165 | unsigned int l; | 166 | unsigned int l; |
166 | int ret; | 167 | int ret; |
@@ -177,16 +178,20 @@ static inline unsigned int __kfifo_from_user_data(struct kfifo *fifo, | |||
177 | /* first put the data starting from fifo->in to buffer end */ | 178 | /* first put the data starting from fifo->in to buffer end */ |
178 | l = min(len, fifo->size - off); | 179 | l = min(len, fifo->size - off); |
179 | ret = copy_from_user(fifo->buffer + off, from, l); | 180 | ret = copy_from_user(fifo->buffer + off, from, l); |
180 | 181 | if (unlikely(ret)) { | |
181 | if (unlikely(ret)) | 182 | *lenout = ret; |
182 | return ret + len - l; | 183 | return -EFAULT; |
184 | } | ||
185 | *lenout = l; | ||
183 | 186 | ||
184 | /* then put the rest (if any) at the beginning of the buffer */ | 187 | /* then put the rest (if any) at the beginning of the buffer */ |
185 | return copy_from_user(fifo->buffer, from + l, len - l); | 188 | ret = copy_from_user(fifo->buffer, from + l, len - l); |
189 | *lenout += ret ? ret : len - l; | ||
190 | return ret ? -EFAULT : 0; | ||
186 | } | 191 | } |
187 | 192 | ||
188 | static inline unsigned int __kfifo_to_user_data(struct kfifo *fifo, | 193 | static inline int __kfifo_to_user_data(struct kfifo *fifo, |
189 | void __user *to, unsigned int len, unsigned int off) | 194 | void __user *to, unsigned int len, unsigned int off, unsigned *lenout) |
190 | { | 195 | { |
191 | unsigned int l; | 196 | unsigned int l; |
192 | int ret; | 197 | int ret; |
@@ -203,12 +208,21 @@ static inline unsigned int __kfifo_to_user_data(struct kfifo *fifo, | |||
203 | /* first get the data from fifo->out until the end of the buffer */ | 208 | /* first get the data from fifo->out until the end of the buffer */ |
204 | l = min(len, fifo->size - off); | 209 | l = min(len, fifo->size - off); |
205 | ret = copy_to_user(to, fifo->buffer + off, l); | 210 | ret = copy_to_user(to, fifo->buffer + off, l); |
206 | 211 | *lenout = l; | |
207 | if (unlikely(ret)) | 212 | if (unlikely(ret)) { |
208 | return ret + len - l; | 213 | *lenout -= ret; |
214 | return -EFAULT; | ||
215 | } | ||
209 | 216 | ||
210 | /* then get the rest (if any) from the beginning of the buffer */ | 217 | /* then get the rest (if any) from the beginning of the buffer */ |
211 | return copy_to_user(to + l, fifo->buffer, len - l); | 218 | len -= l; |
219 | ret = copy_to_user(to + l, fifo->buffer, len); | ||
220 | if (unlikely(ret)) { | ||
221 | *lenout += len - ret; | ||
222 | return -EFAULT; | ||
223 | } | ||
224 | *lenout += len; | ||
225 | return 0; | ||
212 | } | 226 | } |
213 | 227 | ||
214 | unsigned int __kfifo_in_n(struct kfifo *fifo, | 228 | unsigned int __kfifo_in_n(struct kfifo *fifo, |
@@ -235,7 +249,7 @@ EXPORT_SYMBOL(__kfifo_in_n); | |||
235 | * Note that with only one concurrent reader and one concurrent | 249 | * Note that with only one concurrent reader and one concurrent |
236 | * writer, you don't need extra locking to use these functions. | 250 | * writer, you don't need extra locking to use these functions. |
237 | */ | 251 | */ |
238 | unsigned int kfifo_in(struct kfifo *fifo, const unsigned char *from, | 252 | unsigned int kfifo_in(struct kfifo *fifo, const void *from, |
239 | unsigned int len) | 253 | unsigned int len) |
240 | { | 254 | { |
241 | len = min(kfifo_avail(fifo), len); | 255 | len = min(kfifo_avail(fifo), len); |
@@ -277,7 +291,7 @@ EXPORT_SYMBOL(__kfifo_out_n); | |||
277 | * Note that with only one concurrent reader and one concurrent | 291 | * Note that with only one concurrent reader and one concurrent |
278 | * writer, you don't need extra locking to use these functions. | 292 | * writer, you don't need extra locking to use these functions. |
279 | */ | 293 | */ |
280 | unsigned int kfifo_out(struct kfifo *fifo, unsigned char *to, unsigned int len) | 294 | unsigned int kfifo_out(struct kfifo *fifo, void *to, unsigned int len) |
281 | { | 295 | { |
282 | len = min(kfifo_len(fifo), len); | 296 | len = min(kfifo_len(fifo), len); |
283 | 297 | ||
@@ -288,6 +302,27 @@ unsigned int kfifo_out(struct kfifo *fifo, unsigned char *to, unsigned int len) | |||
288 | } | 302 | } |
289 | EXPORT_SYMBOL(kfifo_out); | 303 | EXPORT_SYMBOL(kfifo_out); |
290 | 304 | ||
305 | /** | ||
306 | * kfifo_out_peek - copy some data from the FIFO, but do not remove it | ||
307 | * @fifo: the fifo to be used. | ||
308 | * @to: where the data must be copied. | ||
309 | * @len: the size of the destination buffer. | ||
310 | * @offset: offset into the fifo | ||
311 | * | ||
312 | * This function copies at most @len bytes at @offset from the FIFO | ||
313 | * into the @to buffer and returns the number of copied bytes. | ||
314 | * The data is not removed from the FIFO. | ||
315 | */ | ||
316 | unsigned int kfifo_out_peek(struct kfifo *fifo, void *to, unsigned int len, | ||
317 | unsigned offset) | ||
318 | { | ||
319 | len = min(kfifo_len(fifo), len + offset); | ||
320 | |||
321 | __kfifo_out_data(fifo, to, len, offset); | ||
322 | return len; | ||
323 | } | ||
324 | EXPORT_SYMBOL(kfifo_out_peek); | ||
325 | |||
291 | unsigned int __kfifo_out_generic(struct kfifo *fifo, | 326 | unsigned int __kfifo_out_generic(struct kfifo *fifo, |
292 | void *to, unsigned int len, unsigned int recsize, | 327 | void *to, unsigned int len, unsigned int recsize, |
293 | unsigned int *total) | 328 | unsigned int *total) |
@@ -299,10 +334,13 @@ EXPORT_SYMBOL(__kfifo_out_generic); | |||
299 | unsigned int __kfifo_from_user_n(struct kfifo *fifo, | 334 | unsigned int __kfifo_from_user_n(struct kfifo *fifo, |
300 | const void __user *from, unsigned int len, unsigned int recsize) | 335 | const void __user *from, unsigned int len, unsigned int recsize) |
301 | { | 336 | { |
337 | unsigned total; | ||
338 | |||
302 | if (kfifo_avail(fifo) < len + recsize) | 339 | if (kfifo_avail(fifo) < len + recsize) |
303 | return len + 1; | 340 | return len + 1; |
304 | 341 | ||
305 | return __kfifo_from_user_data(fifo, from, len, recsize); | 342 | __kfifo_from_user_data(fifo, from, len, recsize, &total); |
343 | return total; | ||
306 | } | 344 | } |
307 | EXPORT_SYMBOL(__kfifo_from_user_n); | 345 | EXPORT_SYMBOL(__kfifo_from_user_n); |
308 | 346 | ||
@@ -313,18 +351,21 @@ EXPORT_SYMBOL(__kfifo_from_user_n); | |||
313 | * @len: the length of the data to be added. | 351 | * @len: the length of the data to be added. |
314 | * | 352 | * |
315 | * This function copies at most @len bytes from the @from into the | 353 | * This function copies at most @len bytes from the @from into the |
316 | * FIFO depending and returns the number of copied bytes. | 354 | * FIFO depending and returns -EFAULT/0. |
317 | * | 355 | * |
318 | * Note that with only one concurrent reader and one concurrent | 356 | * Note that with only one concurrent reader and one concurrent |
319 | * writer, you don't need extra locking to use these functions. | 357 | * writer, you don't need extra locking to use these functions. |
320 | */ | 358 | */ |
321 | unsigned int kfifo_from_user(struct kfifo *fifo, | 359 | int kfifo_from_user(struct kfifo *fifo, |
322 | const void __user *from, unsigned int len) | 360 | const void __user *from, unsigned int len, unsigned *total) |
323 | { | 361 | { |
362 | int ret; | ||
324 | len = min(kfifo_avail(fifo), len); | 363 | len = min(kfifo_avail(fifo), len); |
325 | len -= __kfifo_from_user_data(fifo, from, len, 0); | 364 | ret = __kfifo_from_user_data(fifo, from, len, 0, total); |
365 | if (ret) | ||
366 | return ret; | ||
326 | __kfifo_add_in(fifo, len); | 367 | __kfifo_add_in(fifo, len); |
327 | return len; | 368 | return 0; |
328 | } | 369 | } |
329 | EXPORT_SYMBOL(kfifo_from_user); | 370 | EXPORT_SYMBOL(kfifo_from_user); |
330 | 371 | ||
@@ -339,17 +380,17 @@ unsigned int __kfifo_to_user_n(struct kfifo *fifo, | |||
339 | void __user *to, unsigned int len, unsigned int reclen, | 380 | void __user *to, unsigned int len, unsigned int reclen, |
340 | unsigned int recsize) | 381 | unsigned int recsize) |
341 | { | 382 | { |
342 | unsigned int ret; | 383 | unsigned int ret, total; |
343 | 384 | ||
344 | if (kfifo_len(fifo) < reclen + recsize) | 385 | if (kfifo_len(fifo) < reclen + recsize) |
345 | return len; | 386 | return len; |
346 | 387 | ||
347 | ret = __kfifo_to_user_data(fifo, to, reclen, recsize); | 388 | ret = __kfifo_to_user_data(fifo, to, reclen, recsize, &total); |
348 | 389 | ||
349 | if (likely(ret == 0)) | 390 | if (likely(ret == 0)) |
350 | __kfifo_add_out(fifo, reclen + recsize); | 391 | __kfifo_add_out(fifo, reclen + recsize); |
351 | 392 | ||
352 | return ret; | 393 | return total; |
353 | } | 394 | } |
354 | EXPORT_SYMBOL(__kfifo_to_user_n); | 395 | EXPORT_SYMBOL(__kfifo_to_user_n); |
355 | 396 | ||
@@ -358,20 +399,22 @@ EXPORT_SYMBOL(__kfifo_to_user_n); | |||
358 | * @fifo: the fifo to be used. | 399 | * @fifo: the fifo to be used. |
359 | * @to: where the data must be copied. | 400 | * @to: where the data must be copied. |
360 | * @len: the size of the destination buffer. | 401 | * @len: the size of the destination buffer. |
402 | @ @lenout: pointer to output variable with copied data | ||
361 | * | 403 | * |
362 | * This function copies at most @len bytes from the FIFO into the | 404 | * This function copies at most @len bytes from the FIFO into the |
363 | * @to buffer and returns the number of copied bytes. | 405 | * @to buffer and 0 or -EFAULT. |
364 | * | 406 | * |
365 | * Note that with only one concurrent reader and one concurrent | 407 | * Note that with only one concurrent reader and one concurrent |
366 | * writer, you don't need extra locking to use these functions. | 408 | * writer, you don't need extra locking to use these functions. |
367 | */ | 409 | */ |
368 | unsigned int kfifo_to_user(struct kfifo *fifo, | 410 | int kfifo_to_user(struct kfifo *fifo, |
369 | void __user *to, unsigned int len) | 411 | void __user *to, unsigned int len, unsigned *lenout) |
370 | { | 412 | { |
413 | int ret; | ||
371 | len = min(kfifo_len(fifo), len); | 414 | len = min(kfifo_len(fifo), len); |
372 | len -= __kfifo_to_user_data(fifo, to, len, 0); | 415 | ret = __kfifo_to_user_data(fifo, to, len, 0, lenout); |
373 | __kfifo_add_out(fifo, len); | 416 | __kfifo_add_out(fifo, *lenout); |
374 | return len; | 417 | return ret; |
375 | } | 418 | } |
376 | EXPORT_SYMBOL(kfifo_to_user); | 419 | EXPORT_SYMBOL(kfifo_to_user); |
377 | 420 | ||
diff --git a/kernel/kmod.c b/kernel/kmod.c index 25b103190364..bf0e231d9702 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
@@ -520,13 +520,15 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp, | |||
520 | return -ENOMEM; | 520 | return -ENOMEM; |
521 | 521 | ||
522 | ret = call_usermodehelper_stdinpipe(sub_info, filp); | 522 | ret = call_usermodehelper_stdinpipe(sub_info, filp); |
523 | if (ret < 0) | 523 | if (ret < 0) { |
524 | goto out; | 524 | call_usermodehelper_freeinfo(sub_info); |
525 | return ret; | ||
526 | } | ||
525 | 527 | ||
526 | return call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC); | 528 | ret = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC); |
529 | if (ret < 0) /* Failed to execute helper, close pipe */ | ||
530 | filp_close(*filp, NULL); | ||
527 | 531 | ||
528 | out: | ||
529 | call_usermodehelper_freeinfo(sub_info); | ||
530 | return ret; | 532 | return ret; |
531 | } | 533 | } |
532 | EXPORT_SYMBOL(call_usermodehelper_pipe); | 534 | EXPORT_SYMBOL(call_usermodehelper_pipe); |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index e5342a344c43..b7df302a0204 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -1035,7 +1035,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp) | |||
1035 | /* Pre-allocate memory for max kretprobe instances */ | 1035 | /* Pre-allocate memory for max kretprobe instances */ |
1036 | if (rp->maxactive <= 0) { | 1036 | if (rp->maxactive <= 0) { |
1037 | #ifdef CONFIG_PREEMPT | 1037 | #ifdef CONFIG_PREEMPT |
1038 | rp->maxactive = max(10, 2 * num_possible_cpus()); | 1038 | rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus()); |
1039 | #else | 1039 | #else |
1040 | rp->maxactive = num_possible_cpus(); | 1040 | rp->maxactive = num_possible_cpus(); |
1041 | #endif | 1041 | #endif |
diff --git a/kernel/module.c b/kernel/module.c index e96b8ed1cb6a..f82386bd9ee9 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -1010,6 +1010,12 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs, | |||
1010 | * J. Corbet <corbet@lwn.net> | 1010 | * J. Corbet <corbet@lwn.net> |
1011 | */ | 1011 | */ |
1012 | #if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) | 1012 | #if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) |
1013 | |||
1014 | static inline bool sect_empty(const Elf_Shdr *sect) | ||
1015 | { | ||
1016 | return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0; | ||
1017 | } | ||
1018 | |||
1013 | struct module_sect_attr | 1019 | struct module_sect_attr |
1014 | { | 1020 | { |
1015 | struct module_attribute mattr; | 1021 | struct module_attribute mattr; |
@@ -1051,8 +1057,7 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect, | |||
1051 | 1057 | ||
1052 | /* Count loaded sections and allocate structures */ | 1058 | /* Count loaded sections and allocate structures */ |
1053 | for (i = 0; i < nsect; i++) | 1059 | for (i = 0; i < nsect; i++) |
1054 | if (sechdrs[i].sh_flags & SHF_ALLOC | 1060 | if (!sect_empty(&sechdrs[i])) |
1055 | && sechdrs[i].sh_size) | ||
1056 | nloaded++; | 1061 | nloaded++; |
1057 | size[0] = ALIGN(sizeof(*sect_attrs) | 1062 | size[0] = ALIGN(sizeof(*sect_attrs) |
1058 | + nloaded * sizeof(sect_attrs->attrs[0]), | 1063 | + nloaded * sizeof(sect_attrs->attrs[0]), |
@@ -1070,9 +1075,7 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect, | |||
1070 | sattr = §_attrs->attrs[0]; | 1075 | sattr = §_attrs->attrs[0]; |
1071 | gattr = §_attrs->grp.attrs[0]; | 1076 | gattr = §_attrs->grp.attrs[0]; |
1072 | for (i = 0; i < nsect; i++) { | 1077 | for (i = 0; i < nsect; i++) { |
1073 | if (! (sechdrs[i].sh_flags & SHF_ALLOC)) | 1078 | if (sect_empty(&sechdrs[i])) |
1074 | continue; | ||
1075 | if (!sechdrs[i].sh_size) | ||
1076 | continue; | 1079 | continue; |
1077 | sattr->address = sechdrs[i].sh_addr; | 1080 | sattr->address = sechdrs[i].sh_addr; |
1078 | sattr->name = kstrdup(secstrings + sechdrs[i].sh_name, | 1081 | sattr->name = kstrdup(secstrings + sechdrs[i].sh_name, |
@@ -1156,7 +1159,7 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect, | |||
1156 | /* Count notes sections and allocate structures. */ | 1159 | /* Count notes sections and allocate structures. */ |
1157 | notes = 0; | 1160 | notes = 0; |
1158 | for (i = 0; i < nsect; i++) | 1161 | for (i = 0; i < nsect; i++) |
1159 | if ((sechdrs[i].sh_flags & SHF_ALLOC) && | 1162 | if (!sect_empty(&sechdrs[i]) && |
1160 | (sechdrs[i].sh_type == SHT_NOTE)) | 1163 | (sechdrs[i].sh_type == SHT_NOTE)) |
1161 | ++notes; | 1164 | ++notes; |
1162 | 1165 | ||
@@ -1172,7 +1175,7 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect, | |||
1172 | notes_attrs->notes = notes; | 1175 | notes_attrs->notes = notes; |
1173 | nattr = ¬es_attrs->attrs[0]; | 1176 | nattr = ¬es_attrs->attrs[0]; |
1174 | for (loaded = i = 0; i < nsect; ++i) { | 1177 | for (loaded = i = 0; i < nsect; ++i) { |
1175 | if (!(sechdrs[i].sh_flags & SHF_ALLOC)) | 1178 | if (sect_empty(&sechdrs[i])) |
1176 | continue; | 1179 | continue; |
1177 | if (sechdrs[i].sh_type == SHT_NOTE) { | 1180 | if (sechdrs[i].sh_type == SHT_NOTE) { |
1178 | nattr->attr.name = mod->sect_attrs->attrs[loaded].name; | 1181 | nattr->attr.name = mod->sect_attrs->attrs[loaded].name; |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 1f38270f08c7..d27746bd3a06 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -3268,6 +3268,9 @@ static void perf_event_task_output(struct perf_event *event, | |||
3268 | 3268 | ||
3269 | static int perf_event_task_match(struct perf_event *event) | 3269 | static int perf_event_task_match(struct perf_event *event) |
3270 | { | 3270 | { |
3271 | if (event->state != PERF_EVENT_STATE_ACTIVE) | ||
3272 | return 0; | ||
3273 | |||
3271 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 3274 | if (event->cpu != -1 && event->cpu != smp_processor_id()) |
3272 | return 0; | 3275 | return 0; |
3273 | 3276 | ||
@@ -3377,6 +3380,9 @@ static void perf_event_comm_output(struct perf_event *event, | |||
3377 | 3380 | ||
3378 | static int perf_event_comm_match(struct perf_event *event) | 3381 | static int perf_event_comm_match(struct perf_event *event) |
3379 | { | 3382 | { |
3383 | if (event->state != PERF_EVENT_STATE_ACTIVE) | ||
3384 | return 0; | ||
3385 | |||
3380 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 3386 | if (event->cpu != -1 && event->cpu != smp_processor_id()) |
3381 | return 0; | 3387 | return 0; |
3382 | 3388 | ||
@@ -3494,6 +3500,9 @@ static void perf_event_mmap_output(struct perf_event *event, | |||
3494 | static int perf_event_mmap_match(struct perf_event *event, | 3500 | static int perf_event_mmap_match(struct perf_event *event, |
3495 | struct perf_mmap_event *mmap_event) | 3501 | struct perf_mmap_event *mmap_event) |
3496 | { | 3502 | { |
3503 | if (event->state != PERF_EVENT_STATE_ACTIVE) | ||
3504 | return 0; | ||
3505 | |||
3497 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 3506 | if (event->cpu != -1 && event->cpu != smp_processor_id()) |
3498 | return 0; | 3507 | return 0; |
3499 | 3508 | ||
@@ -5148,7 +5157,7 @@ int perf_event_init_task(struct task_struct *child) | |||
5148 | GFP_KERNEL); | 5157 | GFP_KERNEL); |
5149 | if (!child_ctx) { | 5158 | if (!child_ctx) { |
5150 | ret = -ENOMEM; | 5159 | ret = -ENOMEM; |
5151 | goto exit; | 5160 | break; |
5152 | } | 5161 | } |
5153 | 5162 | ||
5154 | __perf_event_init_context(child_ctx, child); | 5163 | __perf_event_init_context(child_ctx, child); |
@@ -5164,7 +5173,7 @@ int perf_event_init_task(struct task_struct *child) | |||
5164 | } | 5173 | } |
5165 | } | 5174 | } |
5166 | 5175 | ||
5167 | if (inherited_all) { | 5176 | if (child_ctx && inherited_all) { |
5168 | /* | 5177 | /* |
5169 | * Mark the child context as a clone of the parent | 5178 | * Mark the child context as a clone of the parent |
5170 | * context, or of whatever the parent is a clone of. | 5179 | * context, or of whatever the parent is a clone of. |
@@ -5184,7 +5193,6 @@ int perf_event_init_task(struct task_struct *child) | |||
5184 | get_ctx(child_ctx->parent_ctx); | 5193 | get_ctx(child_ctx->parent_ctx); |
5185 | } | 5194 | } |
5186 | 5195 | ||
5187 | exit: | ||
5188 | mutex_unlock(&parent_ctx->mutex); | 5196 | mutex_unlock(&parent_ctx->mutex); |
5189 | 5197 | ||
5190 | perf_unpin_context(parent_ctx); | 5198 | perf_unpin_context(parent_ctx); |
diff --git a/kernel/sched.c b/kernel/sched.c index c535cc4f6428..4508fe7048be 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -5530,8 +5530,11 @@ need_resched_nonpreemptible: | |||
5530 | 5530 | ||
5531 | post_schedule(rq); | 5531 | post_schedule(rq); |
5532 | 5532 | ||
5533 | if (unlikely(reacquire_kernel_lock(current) < 0)) | 5533 | if (unlikely(reacquire_kernel_lock(current) < 0)) { |
5534 | prev = rq->curr; | ||
5535 | switch_count = &prev->nivcsw; | ||
5534 | goto need_resched_nonpreemptible; | 5536 | goto need_resched_nonpreemptible; |
5537 | } | ||
5535 | 5538 | ||
5536 | preempt_enable_no_resched(); | 5539 | preempt_enable_no_resched(); |
5537 | if (need_resched()) | 5540 | if (need_resched()) |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 42ac3c9f66f6..8fe7ee81c552 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1508,7 +1508,7 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag | |||
1508 | * If there's an idle sibling in this domain, make that | 1508 | * If there's an idle sibling in this domain, make that |
1509 | * the wake_affine target instead of the current cpu. | 1509 | * the wake_affine target instead of the current cpu. |
1510 | */ | 1510 | */ |
1511 | if (tmp->flags & SD_PREFER_SIBLING) | 1511 | if (tmp->flags & SD_SHARE_PKG_RESOURCES) |
1512 | target = select_idle_sibling(p, tmp, target); | 1512 | target = select_idle_sibling(p, tmp, target); |
1513 | 1513 | ||
1514 | if (target >= 0) { | 1514 | if (target >= 0) { |
diff --git a/kernel/signal.c b/kernel/signal.c index d09692b40376..934ae5e687b9 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -979,7 +979,8 @@ static void print_fatal_signal(struct pt_regs *regs, int signr) | |||
979 | for (i = 0; i < 16; i++) { | 979 | for (i = 0; i < 16; i++) { |
980 | unsigned char insn; | 980 | unsigned char insn; |
981 | 981 | ||
982 | __get_user(insn, (unsigned char *)(regs->ip + i)); | 982 | if (get_user(insn, (unsigned char *)(regs->ip + i))) |
983 | break; | ||
983 | printk("%02x ", insn); | 984 | printk("%02x ", insn); |
984 | } | 985 | } |
985 | } | 986 | } |
diff --git a/kernel/smp.c b/kernel/smp.c index de735a6637d0..f10408422444 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -347,7 +347,7 @@ int smp_call_function_any(const struct cpumask *mask, | |||
347 | goto call; | 347 | goto call; |
348 | 348 | ||
349 | /* Try for same node. */ | 349 | /* Try for same node. */ |
350 | nodemask = cpumask_of_node(cpu); | 350 | nodemask = cpumask_of_node(cpu_to_node(cpu)); |
351 | for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; | 351 | for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; |
352 | cpu = cpumask_next_and(cpu, nodemask, mask)) { | 352 | cpu = cpumask_next_and(cpu, nodemask, mask)) { |
353 | if (cpu_online(cpu)) | 353 | if (cpu_online(cpu)) |
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c index 112533d5fc08..8f5d16e0707a 100644 --- a/kernel/sysctl_binary.c +++ b/kernel/sysctl_binary.c | |||
@@ -1417,6 +1417,35 @@ static void deprecated_sysctl_warning(const int *name, int nlen) | |||
1417 | return; | 1417 | return; |
1418 | } | 1418 | } |
1419 | 1419 | ||
1420 | #define WARN_ONCE_HASH_BITS 8 | ||
1421 | #define WARN_ONCE_HASH_SIZE (1<<WARN_ONCE_HASH_BITS) | ||
1422 | |||
1423 | static DECLARE_BITMAP(warn_once_bitmap, WARN_ONCE_HASH_SIZE); | ||
1424 | |||
1425 | #define FNV32_OFFSET 2166136261U | ||
1426 | #define FNV32_PRIME 0x01000193 | ||
1427 | |||
1428 | /* | ||
1429 | * Print each legacy sysctl (approximately) only once. | ||
1430 | * To avoid making the tables non-const use a external | ||
1431 | * hash-table instead. | ||
1432 | * Worst case hash collision: 6, but very rarely. | ||
1433 | * NOTE! We don't use the SMP-safe bit tests. We simply | ||
1434 | * don't care enough. | ||
1435 | */ | ||
1436 | static void warn_on_bintable(const int *name, int nlen) | ||
1437 | { | ||
1438 | int i; | ||
1439 | u32 hash = FNV32_OFFSET; | ||
1440 | |||
1441 | for (i = 0; i < nlen; i++) | ||
1442 | hash = (hash ^ name[i]) * FNV32_PRIME; | ||
1443 | hash %= WARN_ONCE_HASH_SIZE; | ||
1444 | if (__test_and_set_bit(hash, warn_once_bitmap)) | ||
1445 | return; | ||
1446 | deprecated_sysctl_warning(name, nlen); | ||
1447 | } | ||
1448 | |||
1420 | static ssize_t do_sysctl(int __user *args_name, int nlen, | 1449 | static ssize_t do_sysctl(int __user *args_name, int nlen, |
1421 | void __user *oldval, size_t oldlen, void __user *newval, size_t newlen) | 1450 | void __user *oldval, size_t oldlen, void __user *newval, size_t newlen) |
1422 | { | 1451 | { |
@@ -1431,7 +1460,7 @@ static ssize_t do_sysctl(int __user *args_name, int nlen, | |||
1431 | if (get_user(name[i], args_name + i)) | 1460 | if (get_user(name[i], args_name + i)) |
1432 | return -EFAULT; | 1461 | return -EFAULT; |
1433 | 1462 | ||
1434 | deprecated_sysctl_warning(name, nlen); | 1463 | warn_on_bintable(name, nlen); |
1435 | 1464 | ||
1436 | return binary_sysctl(name, nlen, oldval, oldlen, newval, newlen); | 1465 | return binary_sysctl(name, nlen, oldval, oldlen, newval, newlen); |
1437 | } | 1466 | } |
diff --git a/kernel/timer.c b/kernel/timer.c index 15533b792397..c61a7949387f 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -1198,6 +1198,7 @@ void update_process_times(int user_tick) | |||
1198 | run_local_timers(); | 1198 | run_local_timers(); |
1199 | rcu_check_callbacks(cpu, user_tick); | 1199 | rcu_check_callbacks(cpu, user_tick); |
1200 | printk_tick(); | 1200 | printk_tick(); |
1201 | perf_event_do_pending(); | ||
1201 | scheduler_tick(); | 1202 | scheduler_tick(); |
1202 | run_posix_cpu_timers(p); | 1203 | run_posix_cpu_timers(p); |
1203 | } | 1204 | } |
@@ -1209,8 +1210,6 @@ static void run_timer_softirq(struct softirq_action *h) | |||
1209 | { | 1210 | { |
1210 | struct tvec_base *base = __get_cpu_var(tvec_bases); | 1211 | struct tvec_base *base = __get_cpu_var(tvec_bases); |
1211 | 1212 | ||
1212 | perf_event_do_pending(); | ||
1213 | |||
1214 | hrtimer_run_pending(); | 1213 | hrtimer_run_pending(); |
1215 | 1214 | ||
1216 | if (time_after_eq(jiffies, base->timer_jiffies)) | 1215 | if (time_after_eq(jiffies, base->timer_jiffies)) |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index d006554888dc..6c22d8a2f289 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -12,17 +12,17 @@ config NOP_TRACER | |||
12 | config HAVE_FTRACE_NMI_ENTER | 12 | config HAVE_FTRACE_NMI_ENTER |
13 | bool | 13 | bool |
14 | help | 14 | help |
15 | See Documentation/trace/ftrace-implementation.txt | 15 | See Documentation/trace/ftrace-design.txt |
16 | 16 | ||
17 | config HAVE_FUNCTION_TRACER | 17 | config HAVE_FUNCTION_TRACER |
18 | bool | 18 | bool |
19 | help | 19 | help |
20 | See Documentation/trace/ftrace-implementation.txt | 20 | See Documentation/trace/ftrace-design.txt |
21 | 21 | ||
22 | config HAVE_FUNCTION_GRAPH_TRACER | 22 | config HAVE_FUNCTION_GRAPH_TRACER |
23 | bool | 23 | bool |
24 | help | 24 | help |
25 | See Documentation/trace/ftrace-implementation.txt | 25 | See Documentation/trace/ftrace-design.txt |
26 | 26 | ||
27 | config HAVE_FUNCTION_GRAPH_FP_TEST | 27 | config HAVE_FUNCTION_GRAPH_FP_TEST |
28 | bool | 28 | bool |
@@ -34,17 +34,17 @@ config HAVE_FUNCTION_GRAPH_FP_TEST | |||
34 | config HAVE_FUNCTION_TRACE_MCOUNT_TEST | 34 | config HAVE_FUNCTION_TRACE_MCOUNT_TEST |
35 | bool | 35 | bool |
36 | help | 36 | help |
37 | See Documentation/trace/ftrace-implementation.txt | 37 | See Documentation/trace/ftrace-design.txt |
38 | 38 | ||
39 | config HAVE_DYNAMIC_FTRACE | 39 | config HAVE_DYNAMIC_FTRACE |
40 | bool | 40 | bool |
41 | help | 41 | help |
42 | See Documentation/trace/ftrace-implementation.txt | 42 | See Documentation/trace/ftrace-design.txt |
43 | 43 | ||
44 | config HAVE_FTRACE_MCOUNT_RECORD | 44 | config HAVE_FTRACE_MCOUNT_RECORD |
45 | bool | 45 | bool |
46 | help | 46 | help |
47 | See Documentation/trace/ftrace-implementation.txt | 47 | See Documentation/trace/ftrace-design.txt |
48 | 48 | ||
49 | config HAVE_HW_BRANCH_TRACER | 49 | config HAVE_HW_BRANCH_TRACER |
50 | bool | 50 | bool |
@@ -52,7 +52,7 @@ config HAVE_HW_BRANCH_TRACER | |||
52 | config HAVE_SYSCALL_TRACEPOINTS | 52 | config HAVE_SYSCALL_TRACEPOINTS |
53 | bool | 53 | bool |
54 | help | 54 | help |
55 | See Documentation/trace/ftrace-implementation.txt | 55 | See Documentation/trace/ftrace-design.txt |
56 | 56 | ||
57 | config TRACER_MAX_TRACE | 57 | config TRACER_MAX_TRACE |
58 | bool | 58 | bool |
@@ -83,7 +83,7 @@ config RING_BUFFER_ALLOW_SWAP | |||
83 | # This allows those options to appear when no other tracer is selected. But the | 83 | # This allows those options to appear when no other tracer is selected. But the |
84 | # options do not appear when something else selects it. We need the two options | 84 | # options do not appear when something else selects it. We need the two options |
85 | # GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the | 85 | # GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the |
86 | # hidding of the automatic options. | 86 | # hiding of the automatic options. |
87 | 87 | ||
88 | config TRACING | 88 | config TRACING |
89 | bool | 89 | bool |
@@ -119,7 +119,7 @@ menuconfig FTRACE | |||
119 | bool "Tracers" | 119 | bool "Tracers" |
120 | default y if DEBUG_KERNEL | 120 | default y if DEBUG_KERNEL |
121 | help | 121 | help |
122 | Enable the kernel tracing infrastructure. | 122 | Enable the kernel tracing infrastructure. |
123 | 123 | ||
124 | if FTRACE | 124 | if FTRACE |
125 | 125 | ||
@@ -133,7 +133,7 @@ config FUNCTION_TRACER | |||
133 | help | 133 | help |
134 | Enable the kernel to trace every kernel function. This is done | 134 | Enable the kernel to trace every kernel function. This is done |
135 | by using a compiler feature to insert a small, 5-byte No-Operation | 135 | by using a compiler feature to insert a small, 5-byte No-Operation |
136 | instruction to the beginning of every kernel function, which NOP | 136 | instruction at the beginning of every kernel function, which NOP |
137 | sequence is then dynamically patched into a tracer call when | 137 | sequence is then dynamically patched into a tracer call when |
138 | tracing is enabled by the administrator. If it's runtime disabled | 138 | tracing is enabled by the administrator. If it's runtime disabled |
139 | (the bootup default), then the overhead of the instructions is very | 139 | (the bootup default), then the overhead of the instructions is very |
@@ -150,7 +150,7 @@ config FUNCTION_GRAPH_TRACER | |||
150 | and its entry. | 150 | and its entry. |
151 | Its first purpose is to trace the duration of functions and | 151 | Its first purpose is to trace the duration of functions and |
152 | draw a call graph for each thread with some information like | 152 | draw a call graph for each thread with some information like |
153 | the return value. This is done by setting the current return | 153 | the return value. This is done by setting the current return |
154 | address on the current task structure into a stack of calls. | 154 | address on the current task structure into a stack of calls. |
155 | 155 | ||
156 | 156 | ||
@@ -173,7 +173,7 @@ config IRQSOFF_TRACER | |||
173 | 173 | ||
174 | echo 0 > /sys/kernel/debug/tracing/tracing_max_latency | 174 | echo 0 > /sys/kernel/debug/tracing/tracing_max_latency |
175 | 175 | ||
176 | (Note that kernel size and overhead increases with this option | 176 | (Note that kernel size and overhead increase with this option |
177 | enabled. This option and the preempt-off timing option can be | 177 | enabled. This option and the preempt-off timing option can be |
178 | used together or separately.) | 178 | used together or separately.) |
179 | 179 | ||
@@ -186,7 +186,7 @@ config PREEMPT_TRACER | |||
186 | select TRACER_MAX_TRACE | 186 | select TRACER_MAX_TRACE |
187 | select RING_BUFFER_ALLOW_SWAP | 187 | select RING_BUFFER_ALLOW_SWAP |
188 | help | 188 | help |
189 | This option measures the time spent in preemption off critical | 189 | This option measures the time spent in preemption-off critical |
190 | sections, with microsecond accuracy. | 190 | sections, with microsecond accuracy. |
191 | 191 | ||
192 | The default measurement method is a maximum search, which is | 192 | The default measurement method is a maximum search, which is |
@@ -195,7 +195,7 @@ config PREEMPT_TRACER | |||
195 | 195 | ||
196 | echo 0 > /sys/kernel/debug/tracing/tracing_max_latency | 196 | echo 0 > /sys/kernel/debug/tracing/tracing_max_latency |
197 | 197 | ||
198 | (Note that kernel size and overhead increases with this option | 198 | (Note that kernel size and overhead increase with this option |
199 | enabled. This option and the irqs-off timing option can be | 199 | enabled. This option and the irqs-off timing option can be |
200 | used together or separately.) | 200 | used together or separately.) |
201 | 201 | ||
@@ -222,7 +222,7 @@ config ENABLE_DEFAULT_TRACERS | |||
222 | depends on !GENERIC_TRACER | 222 | depends on !GENERIC_TRACER |
223 | select TRACING | 223 | select TRACING |
224 | help | 224 | help |
225 | This tracer hooks to various trace points in the kernel | 225 | This tracer hooks to various trace points in the kernel, |
226 | allowing the user to pick and choose which trace point they | 226 | allowing the user to pick and choose which trace point they |
227 | want to trace. It also includes the sched_switch tracer plugin. | 227 | want to trace. It also includes the sched_switch tracer plugin. |
228 | 228 | ||
@@ -265,19 +265,19 @@ choice | |||
265 | The likely/unlikely profiler only looks at the conditions that | 265 | The likely/unlikely profiler only looks at the conditions that |
266 | are annotated with a likely or unlikely macro. | 266 | are annotated with a likely or unlikely macro. |
267 | 267 | ||
268 | The "all branch" profiler will profile every if statement in the | 268 | The "all branch" profiler will profile every if-statement in the |
269 | kernel. This profiler will also enable the likely/unlikely | 269 | kernel. This profiler will also enable the likely/unlikely |
270 | profiler as well. | 270 | profiler. |
271 | 271 | ||
272 | Either of the above profilers add a bit of overhead to the system. | 272 | Either of the above profilers adds a bit of overhead to the system. |
273 | If unsure choose "No branch profiling". | 273 | If unsure, choose "No branch profiling". |
274 | 274 | ||
275 | config BRANCH_PROFILE_NONE | 275 | config BRANCH_PROFILE_NONE |
276 | bool "No branch profiling" | 276 | bool "No branch profiling" |
277 | help | 277 | help |
278 | No branch profiling. Branch profiling adds a bit of overhead. | 278 | No branch profiling. Branch profiling adds a bit of overhead. |
279 | Only enable it if you want to analyse the branching behavior. | 279 | Only enable it if you want to analyse the branching behavior. |
280 | Otherwise keep it disabled. | 280 | Otherwise keep it disabled. |
281 | 281 | ||
282 | config PROFILE_ANNOTATED_BRANCHES | 282 | config PROFILE_ANNOTATED_BRANCHES |
283 | bool "Trace likely/unlikely profiler" | 283 | bool "Trace likely/unlikely profiler" |
@@ -288,7 +288,7 @@ config PROFILE_ANNOTATED_BRANCHES | |||
288 | 288 | ||
289 | /sys/kernel/debug/tracing/profile_annotated_branch | 289 | /sys/kernel/debug/tracing/profile_annotated_branch |
290 | 290 | ||
291 | Note: this will add a significant overhead, only turn this | 291 | Note: this will add a significant overhead; only turn this |
292 | on if you need to profile the system's use of these macros. | 292 | on if you need to profile the system's use of these macros. |
293 | 293 | ||
294 | config PROFILE_ALL_BRANCHES | 294 | config PROFILE_ALL_BRANCHES |
@@ -305,7 +305,7 @@ config PROFILE_ALL_BRANCHES | |||
305 | 305 | ||
306 | This configuration, when enabled, will impose a great overhead | 306 | This configuration, when enabled, will impose a great overhead |
307 | on the system. This should only be enabled when the system | 307 | on the system. This should only be enabled when the system |
308 | is to be analyzed | 308 | is to be analyzed in much detail. |
309 | endchoice | 309 | endchoice |
310 | 310 | ||
311 | config TRACING_BRANCHES | 311 | config TRACING_BRANCHES |
@@ -335,7 +335,7 @@ config POWER_TRACER | |||
335 | depends on X86 | 335 | depends on X86 |
336 | select GENERIC_TRACER | 336 | select GENERIC_TRACER |
337 | help | 337 | help |
338 | This tracer helps developers to analyze and optimize the kernels | 338 | This tracer helps developers to analyze and optimize the kernel's |
339 | power management decisions, specifically the C-state and P-state | 339 | power management decisions, specifically the C-state and P-state |
340 | behavior. | 340 | behavior. |
341 | 341 | ||
@@ -391,14 +391,14 @@ config HW_BRANCH_TRACER | |||
391 | select GENERIC_TRACER | 391 | select GENERIC_TRACER |
392 | help | 392 | help |
393 | This tracer records all branches on the system in a circular | 393 | This tracer records all branches on the system in a circular |
394 | buffer giving access to the last N branches for each cpu. | 394 | buffer, giving access to the last N branches for each cpu. |
395 | 395 | ||
396 | config KMEMTRACE | 396 | config KMEMTRACE |
397 | bool "Trace SLAB allocations" | 397 | bool "Trace SLAB allocations" |
398 | select GENERIC_TRACER | 398 | select GENERIC_TRACER |
399 | help | 399 | help |
400 | kmemtrace provides tracing for slab allocator functions, such as | 400 | kmemtrace provides tracing for slab allocator functions, such as |
401 | kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected | 401 | kmalloc, kfree, kmem_cache_alloc, kmem_cache_free, etc. Collected |
402 | data is then fed to the userspace application in order to analyse | 402 | data is then fed to the userspace application in order to analyse |
403 | allocation hotspots, internal fragmentation and so on, making it | 403 | allocation hotspots, internal fragmentation and so on, making it |
404 | possible to see how well an allocator performs, as well as debug | 404 | possible to see how well an allocator performs, as well as debug |
@@ -417,15 +417,15 @@ config WORKQUEUE_TRACER | |||
417 | bool "Trace workqueues" | 417 | bool "Trace workqueues" |
418 | select GENERIC_TRACER | 418 | select GENERIC_TRACER |
419 | help | 419 | help |
420 | The workqueue tracer provides some statistical informations | 420 | The workqueue tracer provides some statistical information |
421 | about each cpu workqueue thread such as the number of the | 421 | about each cpu workqueue thread such as the number of the |
422 | works inserted and executed since their creation. It can help | 422 | works inserted and executed since their creation. It can help |
423 | to evaluate the amount of work each of them have to perform. | 423 | to evaluate the amount of work each of them has to perform. |
424 | For example it can help a developer to decide whether he should | 424 | For example it can help a developer to decide whether he should |
425 | choose a per cpu workqueue instead of a singlethreaded one. | 425 | choose a per-cpu workqueue instead of a singlethreaded one. |
426 | 426 | ||
427 | config BLK_DEV_IO_TRACE | 427 | config BLK_DEV_IO_TRACE |
428 | bool "Support for tracing block io actions" | 428 | bool "Support for tracing block IO actions" |
429 | depends on SYSFS | 429 | depends on SYSFS |
430 | depends on BLOCK | 430 | depends on BLOCK |
431 | select RELAY | 431 | select RELAY |
@@ -456,15 +456,15 @@ config KPROBE_EVENT | |||
456 | select TRACING | 456 | select TRACING |
457 | default y | 457 | default y |
458 | help | 458 | help |
459 | This allows the user to add tracing events (similar to tracepoints) on the fly | 459 | This allows the user to add tracing events (similar to tracepoints) |
460 | via the ftrace interface. See Documentation/trace/kprobetrace.txt | 460 | on the fly via the ftrace interface. See |
461 | for more details. | 461 | Documentation/trace/kprobetrace.txt for more details. |
462 | 462 | ||
463 | Those events can be inserted wherever kprobes can probe, and record | 463 | Those events can be inserted wherever kprobes can probe, and record |
464 | various register and memory values. | 464 | various register and memory values. |
465 | 465 | ||
466 | This option is also required by perf-probe subcommand of perf tools. If | 466 | This option is also required by perf-probe subcommand of perf tools. |
467 | you want to use perf tools, this option is strongly recommended. | 467 | If you want to use perf tools, this option is strongly recommended. |
468 | 468 | ||
469 | config DYNAMIC_FTRACE | 469 | config DYNAMIC_FTRACE |
470 | bool "enable/disable ftrace tracepoints dynamically" | 470 | bool "enable/disable ftrace tracepoints dynamically" |
@@ -472,32 +472,32 @@ config DYNAMIC_FTRACE | |||
472 | depends on HAVE_DYNAMIC_FTRACE | 472 | depends on HAVE_DYNAMIC_FTRACE |
473 | default y | 473 | default y |
474 | help | 474 | help |
475 | This option will modify all the calls to ftrace dynamically | 475 | This option will modify all the calls to ftrace dynamically |
476 | (will patch them out of the binary image and replaces them | 476 | (will patch them out of the binary image and replace them |
477 | with a No-Op instruction) as they are called. A table is | 477 | with a No-Op instruction) as they are called. A table is |
478 | created to dynamically enable them again. | 478 | created to dynamically enable them again. |
479 | 479 | ||
480 | This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but otherwise | 480 | This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but |
481 | has native performance as long as no tracing is active. | 481 | otherwise has native performance as long as no tracing is active. |
482 | 482 | ||
483 | The changes to the code are done by a kernel thread that | 483 | The changes to the code are done by a kernel thread that |
484 | wakes up once a second and checks to see if any ftrace calls | 484 | wakes up once a second and checks to see if any ftrace calls |
485 | were made. If so, it runs stop_machine (stops all CPUS) | 485 | were made. If so, it runs stop_machine (stops all CPUS) |
486 | and modifies the code to jump over the call to ftrace. | 486 | and modifies the code to jump over the call to ftrace. |
487 | 487 | ||
488 | config FUNCTION_PROFILER | 488 | config FUNCTION_PROFILER |
489 | bool "Kernel function profiler" | 489 | bool "Kernel function profiler" |
490 | depends on FUNCTION_TRACER | 490 | depends on FUNCTION_TRACER |
491 | default n | 491 | default n |
492 | help | 492 | help |
493 | This option enables the kernel function profiler. A file is created | 493 | This option enables the kernel function profiler. A file is created |
494 | in debugfs called function_profile_enabled which defaults to zero. | 494 | in debugfs called function_profile_enabled which defaults to zero. |
495 | When a 1 is echoed into this file profiling begins, and when a | 495 | When a 1 is echoed into this file profiling begins, and when a |
496 | zero is entered, profiling stops. A file in the trace_stats | 496 | zero is entered, profiling stops. A "functions" file is created in |
497 | directory called functions, that show the list of functions that | 497 | the trace_stats directory; this file shows the list of functions that |
498 | have been hit and their counters. | 498 | have been hit and their counters. |
499 | 499 | ||
500 | If in doubt, say N | 500 | If in doubt, say N. |
501 | 501 | ||
502 | config FTRACE_MCOUNT_RECORD | 502 | config FTRACE_MCOUNT_RECORD |
503 | def_bool y | 503 | def_bool y |
@@ -556,8 +556,8 @@ config RING_BUFFER_BENCHMARK | |||
556 | tristate "Ring buffer benchmark stress tester" | 556 | tristate "Ring buffer benchmark stress tester" |
557 | depends on RING_BUFFER | 557 | depends on RING_BUFFER |
558 | help | 558 | help |
559 | This option creates a test to stress the ring buffer and bench mark it. | 559 | This option creates a test to stress the ring buffer and benchmark it. |
560 | It creates its own ring buffer such that it will not interfer with | 560 | It creates its own ring buffer such that it will not interfere with |
561 | any other users of the ring buffer (such as ftrace). It then creates | 561 | any other users of the ring buffer (such as ftrace). It then creates |
562 | a producer and consumer that will run for 10 seconds and sleep for | 562 | a producer and consumer that will run for 10 seconds and sleep for |
563 | 10 seconds. Each interval it will print out the number of events | 563 | 10 seconds. Each interval it will print out the number of events |
@@ -566,7 +566,7 @@ config RING_BUFFER_BENCHMARK | |||
566 | It does not disable interrupts or raise its priority, so it may be | 566 | It does not disable interrupts or raise its priority, so it may be |
567 | affected by processes that are running. | 567 | affected by processes that are running. |
568 | 568 | ||
569 | If unsure, say N | 569 | If unsure, say N. |
570 | 570 | ||
571 | endif # FTRACE | 571 | endif # FTRACE |
572 | 572 | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 7968762c8167..1e6640f80454 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1690,7 +1690,7 @@ ftrace_regex_lseek(struct file *file, loff_t offset, int origin) | |||
1690 | static int ftrace_match(char *str, char *regex, int len, int type) | 1690 | static int ftrace_match(char *str, char *regex, int len, int type) |
1691 | { | 1691 | { |
1692 | int matched = 0; | 1692 | int matched = 0; |
1693 | char *ptr; | 1693 | int slen; |
1694 | 1694 | ||
1695 | switch (type) { | 1695 | switch (type) { |
1696 | case MATCH_FULL: | 1696 | case MATCH_FULL: |
@@ -1706,8 +1706,8 @@ static int ftrace_match(char *str, char *regex, int len, int type) | |||
1706 | matched = 1; | 1706 | matched = 1; |
1707 | break; | 1707 | break; |
1708 | case MATCH_END_ONLY: | 1708 | case MATCH_END_ONLY: |
1709 | ptr = strstr(str, regex); | 1709 | slen = strlen(str); |
1710 | if (ptr && (ptr[len] == 0)) | 1710 | if (slen >= len && memcmp(str + slen - len, regex, len) == 0) |
1711 | matched = 1; | 1711 | matched = 1; |
1712 | break; | 1712 | break; |
1713 | } | 1713 | } |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 2326b04c95c4..edefe3b2801b 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -2869,7 +2869,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
2869 | * Splice the empty reader page into the list around the head. | 2869 | * Splice the empty reader page into the list around the head. |
2870 | */ | 2870 | */ |
2871 | reader = rb_set_head_page(cpu_buffer); | 2871 | reader = rb_set_head_page(cpu_buffer); |
2872 | cpu_buffer->reader_page->list.next = reader->list.next; | 2872 | cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); |
2873 | cpu_buffer->reader_page->list.prev = reader->list.prev; | 2873 | cpu_buffer->reader_page->list.prev = reader->list.prev; |
2874 | 2874 | ||
2875 | /* | 2875 | /* |
@@ -2906,7 +2906,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
2906 | * | 2906 | * |
2907 | * Now make the new head point back to the reader page. | 2907 | * Now make the new head point back to the reader page. |
2908 | */ | 2908 | */ |
2909 | reader->list.next->prev = &cpu_buffer->reader_page->list; | 2909 | rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; |
2910 | rb_inc_page(cpu_buffer, &cpu_buffer->head_page); | 2910 | rb_inc_page(cpu_buffer, &cpu_buffer->head_page); |
2911 | 2911 | ||
2912 | /* Finally update the reader page to the new head */ | 2912 | /* Finally update the reader page to the new head */ |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8b9f20ab8eed..0df1b0f2cb9e 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -3949,7 +3949,7 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
3949 | if (!!(topt->flags->val & topt->opt->bit) != val) { | 3949 | if (!!(topt->flags->val & topt->opt->bit) != val) { |
3950 | mutex_lock(&trace_types_lock); | 3950 | mutex_lock(&trace_types_lock); |
3951 | ret = __set_tracer_option(current_trace, topt->flags, | 3951 | ret = __set_tracer_option(current_trace, topt->flags, |
3952 | topt->opt, val); | 3952 | topt->opt, !val); |
3953 | mutex_unlock(&trace_types_lock); | 3953 | mutex_unlock(&trace_types_lock); |
3954 | if (ret) | 3954 | if (ret) |
3955 | return ret; | 3955 | return ret; |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 50504cb228de..e42af9aad69f 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -211,8 +211,9 @@ static int filter_pred_pchar(struct filter_pred *pred, void *event, | |||
211 | { | 211 | { |
212 | char **addr = (char **)(event + pred->offset); | 212 | char **addr = (char **)(event + pred->offset); |
213 | int cmp, match; | 213 | int cmp, match; |
214 | int len = strlen(*addr) + 1; /* including tailing '\0' */ | ||
214 | 215 | ||
215 | cmp = pred->regex.match(*addr, &pred->regex, pred->regex.field_len); | 216 | cmp = pred->regex.match(*addr, &pred->regex, len); |
216 | 217 | ||
217 | match = cmp ^ pred->not; | 218 | match = cmp ^ pred->not; |
218 | 219 | ||
@@ -251,7 +252,18 @@ static int filter_pred_none(struct filter_pred *pred, void *event, | |||
251 | return 0; | 252 | return 0; |
252 | } | 253 | } |
253 | 254 | ||
254 | /* Basic regex callbacks */ | 255 | /* |
256 | * regex_match_foo - Basic regex callbacks | ||
257 | * | ||
258 | * @str: the string to be searched | ||
259 | * @r: the regex structure containing the pattern string | ||
260 | * @len: the length of the string to be searched (including '\0') | ||
261 | * | ||
262 | * Note: | ||
263 | * - @str might not be NULL-terminated if it's of type DYN_STRING | ||
264 | * or STATIC_STRING | ||
265 | */ | ||
266 | |||
255 | static int regex_match_full(char *str, struct regex *r, int len) | 267 | static int regex_match_full(char *str, struct regex *r, int len) |
256 | { | 268 | { |
257 | if (strncmp(str, r->pattern, len) == 0) | 269 | if (strncmp(str, r->pattern, len) == 0) |
@@ -261,23 +273,24 @@ static int regex_match_full(char *str, struct regex *r, int len) | |||
261 | 273 | ||
262 | static int regex_match_front(char *str, struct regex *r, int len) | 274 | static int regex_match_front(char *str, struct regex *r, int len) |
263 | { | 275 | { |
264 | if (strncmp(str, r->pattern, len) == 0) | 276 | if (strncmp(str, r->pattern, r->len) == 0) |
265 | return 1; | 277 | return 1; |
266 | return 0; | 278 | return 0; |
267 | } | 279 | } |
268 | 280 | ||
269 | static int regex_match_middle(char *str, struct regex *r, int len) | 281 | static int regex_match_middle(char *str, struct regex *r, int len) |
270 | { | 282 | { |
271 | if (strstr(str, r->pattern)) | 283 | if (strnstr(str, r->pattern, len)) |
272 | return 1; | 284 | return 1; |
273 | return 0; | 285 | return 0; |
274 | } | 286 | } |
275 | 287 | ||
276 | static int regex_match_end(char *str, struct regex *r, int len) | 288 | static int regex_match_end(char *str, struct regex *r, int len) |
277 | { | 289 | { |
278 | char *ptr = strstr(str, r->pattern); | 290 | int strlen = len - 1; |
279 | 291 | ||
280 | if (ptr && (ptr[r->len] == 0)) | 292 | if (strlen >= r->len && |
293 | memcmp(str + strlen - r->len, r->pattern, r->len) == 0) | ||
281 | return 1; | 294 | return 1; |
282 | return 0; | 295 | return 0; |
283 | } | 296 | } |
@@ -781,10 +794,8 @@ static int filter_add_pred(struct filter_parse_state *ps, | |||
781 | pred->regex.field_len = field->size; | 794 | pred->regex.field_len = field->size; |
782 | } else if (field->filter_type == FILTER_DYN_STRING) | 795 | } else if (field->filter_type == FILTER_DYN_STRING) |
783 | fn = filter_pred_strloc; | 796 | fn = filter_pred_strloc; |
784 | else { | 797 | else |
785 | fn = filter_pred_pchar; | 798 | fn = filter_pred_pchar; |
786 | pred->regex.field_len = strlen(pred->regex.pattern); | ||
787 | } | ||
788 | } else { | 799 | } else { |
789 | if (field->is_signed) | 800 | if (field->is_signed) |
790 | ret = strict_strtoll(pred->regex.pattern, 0, &val); | 801 | ret = strict_strtoll(pred->regex.pattern, 0, &val); |
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 458e5bfe26d0..d4fa5dc1ee4e 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
@@ -158,7 +158,8 @@ ftrace_format_##name(struct ftrace_event_call *unused, \ | |||
158 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ | 158 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ |
159 | ret = trace_define_field(event_call, #type "[" #len "]", #item, \ | 159 | ret = trace_define_field(event_call, #type "[" #len "]", #item, \ |
160 | offsetof(typeof(field), item), \ | 160 | offsetof(typeof(field), item), \ |
161 | sizeof(field.item), 0, FILTER_OTHER); \ | 161 | sizeof(field.item), \ |
162 | is_signed_type(type), FILTER_OTHER); \ | ||
162 | if (ret) \ | 163 | if (ret) \ |
163 | return ret; | 164 | return ret; |
164 | 165 | ||
@@ -168,8 +169,8 @@ ftrace_format_##name(struct ftrace_event_call *unused, \ | |||
168 | ret = trace_define_field(event_call, #type "[" #len "]", #item, \ | 169 | ret = trace_define_field(event_call, #type "[" #len "]", #item, \ |
169 | offsetof(typeof(field), \ | 170 | offsetof(typeof(field), \ |
170 | container.item), \ | 171 | container.item), \ |
171 | sizeof(field.container.item), 0, \ | 172 | sizeof(field.container.item), \ |
172 | FILTER_OTHER); \ | 173 | is_signed_type(type), FILTER_OTHER); \ |
173 | if (ret) \ | 174 | if (ret) \ |
174 | return ret; | 175 | return ret; |
175 | 176 | ||
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 375f81a568dc..6ea90c0e2c96 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -1201,10 +1201,11 @@ static int __probe_event_show_format(struct trace_seq *s, | |||
1201 | #undef SHOW_FIELD | 1201 | #undef SHOW_FIELD |
1202 | #define SHOW_FIELD(type, item, name) \ | 1202 | #define SHOW_FIELD(type, item, name) \ |
1203 | do { \ | 1203 | do { \ |
1204 | ret = trace_seq_printf(s, "\tfield: " #type " %s;\t" \ | 1204 | ret = trace_seq_printf(s, "\tfield:" #type " %s;\t" \ |
1205 | "offset:%u;\tsize:%u;\n", name, \ | 1205 | "offset:%u;\tsize:%u;\tsigned:%d;\n", name,\ |
1206 | (unsigned int)offsetof(typeof(field), item),\ | 1206 | (unsigned int)offsetof(typeof(field), item),\ |
1207 | (unsigned int)sizeof(type)); \ | 1207 | (unsigned int)sizeof(type), \ |
1208 | is_signed_type(type)); \ | ||
1208 | if (!ret) \ | 1209 | if (!ret) \ |
1209 | return 0; \ | 1210 | return 0; \ |
1210 | } while (0) | 1211 | } while (0) |
diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index faf37fa4408c..94103cdcf9d8 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c | |||
@@ -26,12 +26,13 @@ | |||
26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
27 | 27 | ||
28 | #include "trace_output.h" | 28 | #include "trace_output.h" |
29 | #include "trace_stat.h" | ||
30 | #include "trace.h" | 29 | #include "trace.h" |
31 | 30 | ||
32 | #include <linux/hw_breakpoint.h> | 31 | #include <linux/hw_breakpoint.h> |
33 | #include <asm/hw_breakpoint.h> | 32 | #include <asm/hw_breakpoint.h> |
34 | 33 | ||
34 | #include <asm/atomic.h> | ||
35 | |||
35 | /* | 36 | /* |
36 | * For now, let us restrict the no. of symbols traced simultaneously to number | 37 | * For now, let us restrict the no. of symbols traced simultaneously to number |
37 | * of available hardware breakpoint registers. | 38 | * of available hardware breakpoint registers. |
@@ -44,7 +45,7 @@ struct trace_ksym { | |||
44 | struct perf_event **ksym_hbp; | 45 | struct perf_event **ksym_hbp; |
45 | struct perf_event_attr attr; | 46 | struct perf_event_attr attr; |
46 | #ifdef CONFIG_PROFILE_KSYM_TRACER | 47 | #ifdef CONFIG_PROFILE_KSYM_TRACER |
47 | unsigned long counter; | 48 | atomic64_t counter; |
48 | #endif | 49 | #endif |
49 | struct hlist_node ksym_hlist; | 50 | struct hlist_node ksym_hlist; |
50 | }; | 51 | }; |
@@ -69,9 +70,8 @@ void ksym_collect_stats(unsigned long hbp_hit_addr) | |||
69 | 70 | ||
70 | rcu_read_lock(); | 71 | rcu_read_lock(); |
71 | hlist_for_each_entry_rcu(entry, node, &ksym_filter_head, ksym_hlist) { | 72 | hlist_for_each_entry_rcu(entry, node, &ksym_filter_head, ksym_hlist) { |
72 | if ((entry->attr.bp_addr == hbp_hit_addr) && | 73 | if (entry->attr.bp_addr == hbp_hit_addr) { |
73 | (entry->counter <= MAX_UL_INT)) { | 74 | atomic64_inc(&entry->counter); |
74 | entry->counter++; | ||
75 | break; | 75 | break; |
76 | } | 76 | } |
77 | } | 77 | } |
@@ -197,7 +197,6 @@ int process_new_ksym_entry(char *ksymname, int op, unsigned long addr) | |||
197 | entry->attr.bp_addr = addr; | 197 | entry->attr.bp_addr = addr; |
198 | entry->attr.bp_len = HW_BREAKPOINT_LEN_4; | 198 | entry->attr.bp_len = HW_BREAKPOINT_LEN_4; |
199 | 199 | ||
200 | ret = -EAGAIN; | ||
201 | entry->ksym_hbp = register_wide_hw_breakpoint(&entry->attr, | 200 | entry->ksym_hbp = register_wide_hw_breakpoint(&entry->attr, |
202 | ksym_hbp_handler); | 201 | ksym_hbp_handler); |
203 | 202 | ||
@@ -300,8 +299,8 @@ static ssize_t ksym_trace_filter_write(struct file *file, | |||
300 | * 2: echo 0 > ksym_trace_filter | 299 | * 2: echo 0 > ksym_trace_filter |
301 | * 3: echo "*:---" > ksym_trace_filter | 300 | * 3: echo "*:---" > ksym_trace_filter |
302 | */ | 301 | */ |
303 | if (!buf[0] || !strcmp(buf, "0") || | 302 | if (!input_string[0] || !strcmp(input_string, "0") || |
304 | !strcmp(buf, "*:---")) { | 303 | !strcmp(input_string, "*:---")) { |
305 | __ksym_trace_reset(); | 304 | __ksym_trace_reset(); |
306 | ret = 0; | 305 | ret = 0; |
307 | goto out; | 306 | goto out; |
@@ -444,102 +443,77 @@ struct tracer ksym_tracer __read_mostly = | |||
444 | .print_line = ksym_trace_output | 443 | .print_line = ksym_trace_output |
445 | }; | 444 | }; |
446 | 445 | ||
447 | __init static int init_ksym_trace(void) | ||
448 | { | ||
449 | struct dentry *d_tracer; | ||
450 | struct dentry *entry; | ||
451 | |||
452 | d_tracer = tracing_init_dentry(); | ||
453 | ksym_filter_entry_count = 0; | ||
454 | |||
455 | entry = debugfs_create_file("ksym_trace_filter", 0644, d_tracer, | ||
456 | NULL, &ksym_tracing_fops); | ||
457 | if (!entry) | ||
458 | pr_warning("Could not create debugfs " | ||
459 | "'ksym_trace_filter' file\n"); | ||
460 | |||
461 | return register_tracer(&ksym_tracer); | ||
462 | } | ||
463 | device_initcall(init_ksym_trace); | ||
464 | |||
465 | |||
466 | #ifdef CONFIG_PROFILE_KSYM_TRACER | 446 | #ifdef CONFIG_PROFILE_KSYM_TRACER |
467 | static int ksym_tracer_stat_headers(struct seq_file *m) | 447 | static int ksym_profile_show(struct seq_file *m, void *v) |
468 | { | 448 | { |
449 | struct hlist_node *node; | ||
450 | struct trace_ksym *entry; | ||
451 | int access_type = 0; | ||
452 | char fn_name[KSYM_NAME_LEN]; | ||
453 | |||
469 | seq_puts(m, " Access Type "); | 454 | seq_puts(m, " Access Type "); |
470 | seq_puts(m, " Symbol Counter\n"); | 455 | seq_puts(m, " Symbol Counter\n"); |
471 | seq_puts(m, " ----------- "); | 456 | seq_puts(m, " ----------- "); |
472 | seq_puts(m, " ------ -------\n"); | 457 | seq_puts(m, " ------ -------\n"); |
473 | return 0; | ||
474 | } | ||
475 | 458 | ||
476 | static int ksym_tracer_stat_show(struct seq_file *m, void *v) | 459 | rcu_read_lock(); |
477 | { | 460 | hlist_for_each_entry_rcu(entry, node, &ksym_filter_head, ksym_hlist) { |
478 | struct hlist_node *stat = v; | ||
479 | struct trace_ksym *entry; | ||
480 | int access_type = 0; | ||
481 | char fn_name[KSYM_NAME_LEN]; | ||
482 | 461 | ||
483 | entry = hlist_entry(stat, struct trace_ksym, ksym_hlist); | 462 | access_type = entry->attr.bp_type; |
484 | 463 | ||
485 | access_type = entry->attr.bp_type; | 464 | switch (access_type) { |
465 | case HW_BREAKPOINT_R: | ||
466 | seq_puts(m, " R "); | ||
467 | break; | ||
468 | case HW_BREAKPOINT_W: | ||
469 | seq_puts(m, " W "); | ||
470 | break; | ||
471 | case HW_BREAKPOINT_R | HW_BREAKPOINT_W: | ||
472 | seq_puts(m, " RW "); | ||
473 | break; | ||
474 | default: | ||
475 | seq_puts(m, " NA "); | ||
476 | } | ||
486 | 477 | ||
487 | switch (access_type) { | 478 | if (lookup_symbol_name(entry->attr.bp_addr, fn_name) >= 0) |
488 | case HW_BREAKPOINT_R: | 479 | seq_printf(m, " %-36s", fn_name); |
489 | seq_puts(m, " R "); | 480 | else |
490 | break; | 481 | seq_printf(m, " %-36s", "<NA>"); |
491 | case HW_BREAKPOINT_W: | 482 | seq_printf(m, " %15llu\n", |
492 | seq_puts(m, " W "); | 483 | (unsigned long long)atomic64_read(&entry->counter)); |
493 | break; | ||
494 | case HW_BREAKPOINT_R | HW_BREAKPOINT_W: | ||
495 | seq_puts(m, " RW "); | ||
496 | break; | ||
497 | default: | ||
498 | seq_puts(m, " NA "); | ||
499 | } | 484 | } |
500 | 485 | rcu_read_unlock(); | |
501 | if (lookup_symbol_name(entry->attr.bp_addr, fn_name) >= 0) | ||
502 | seq_printf(m, " %-36s", fn_name); | ||
503 | else | ||
504 | seq_printf(m, " %-36s", "<NA>"); | ||
505 | seq_printf(m, " %15lu\n", entry->counter); | ||
506 | 486 | ||
507 | return 0; | 487 | return 0; |
508 | } | 488 | } |
509 | 489 | ||
510 | static void *ksym_tracer_stat_start(struct tracer_stat *trace) | 490 | static int ksym_profile_open(struct inode *node, struct file *file) |
511 | { | 491 | { |
512 | return ksym_filter_head.first; | 492 | return single_open(file, ksym_profile_show, NULL); |
513 | } | ||
514 | |||
515 | static void * | ||
516 | ksym_tracer_stat_next(void *v, int idx) | ||
517 | { | ||
518 | struct hlist_node *stat = v; | ||
519 | |||
520 | return stat->next; | ||
521 | } | 493 | } |
522 | 494 | ||
523 | static struct tracer_stat ksym_tracer_stats = { | 495 | static const struct file_operations ksym_profile_fops = { |
524 | .name = "ksym_tracer", | 496 | .open = ksym_profile_open, |
525 | .stat_start = ksym_tracer_stat_start, | 497 | .read = seq_read, |
526 | .stat_next = ksym_tracer_stat_next, | 498 | .llseek = seq_lseek, |
527 | .stat_headers = ksym_tracer_stat_headers, | 499 | .release = single_release, |
528 | .stat_show = ksym_tracer_stat_show | ||
529 | }; | 500 | }; |
501 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ | ||
530 | 502 | ||
531 | __init static int ksym_tracer_stat_init(void) | 503 | __init static int init_ksym_trace(void) |
532 | { | 504 | { |
533 | int ret; | 505 | struct dentry *d_tracer; |
534 | 506 | ||
535 | ret = register_stat_tracer(&ksym_tracer_stats); | 507 | d_tracer = tracing_init_dentry(); |
536 | if (ret) { | ||
537 | printk(KERN_WARNING "Warning: could not register " | ||
538 | "ksym tracer stats\n"); | ||
539 | return 1; | ||
540 | } | ||
541 | 508 | ||
542 | return 0; | 509 | trace_create_file("ksym_trace_filter", 0644, d_tracer, |
510 | NULL, &ksym_tracing_fops); | ||
511 | |||
512 | #ifdef CONFIG_PROFILE_KSYM_TRACER | ||
513 | trace_create_file("ksym_profile", 0444, d_tracer, | ||
514 | NULL, &ksym_profile_fops); | ||
515 | #endif | ||
516 | |||
517 | return register_tracer(&ksym_tracer); | ||
543 | } | 518 | } |
544 | fs_initcall(ksym_tracer_stat_init); | 519 | device_initcall(init_ksym_trace); |
545 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ | ||