diff options
| author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2010-03-01 02:55:20 -0500 |
|---|---|---|
| committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2010-03-01 02:55:20 -0500 |
| commit | 35858adbfca13678af99fb31618ef4428d6dedb0 (patch) | |
| tree | 3336feaa61324486945816cb52c347733e7c0821 /kernel/trace | |
| parent | 197d4db752e67160d79fed09968c2140376a80a3 (diff) | |
| parent | 4b70858ba8d4537daf782defebe5f2ff80ccef2b (diff) | |
Merge branch 'next' into for-linus
Diffstat (limited to 'kernel/trace')
| -rw-r--r-- | kernel/trace/Kconfig | 112 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 36 | ||||
| -rw-r--r-- | kernel/trace/power-traces.c | 2 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer.c | 49 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 238 | ||||
| -rw-r--r-- | kernel/trace/trace.h | 23 | ||||
| -rw-r--r-- | kernel/trace/trace_clock.c | 8 | ||||
| -rw-r--r-- | kernel/trace/trace_event_profile.c | 6 | ||||
| -rw-r--r-- | kernel/trace/trace_events.c | 41 | ||||
| -rw-r--r-- | kernel/trace/trace_events_filter.c | 29 | ||||
| -rw-r--r-- | kernel/trace/trace_export.c | 11 | ||||
| -rw-r--r-- | kernel/trace/trace_irqsoff.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_kprobe.c | 47 | ||||
| -rw-r--r-- | kernel/trace/trace_ksym.c | 188 | ||||
| -rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 16 | ||||
| -rw-r--r-- | kernel/trace/trace_selftest.c | 4 | ||||
| -rw-r--r-- | kernel/trace/trace_stack.c | 16 | ||||
| -rw-r--r-- | kernel/trace/trace_syscalls.c | 18 | ||||
| -rw-r--r-- | kernel/trace/trace_sysprof.c | 1 |
19 files changed, 409 insertions, 438 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index d006554888dc..6c22d8a2f289 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
| @@ -12,17 +12,17 @@ config NOP_TRACER | |||
| 12 | config HAVE_FTRACE_NMI_ENTER | 12 | config HAVE_FTRACE_NMI_ENTER |
| 13 | bool | 13 | bool |
| 14 | help | 14 | help |
| 15 | See Documentation/trace/ftrace-implementation.txt | 15 | See Documentation/trace/ftrace-design.txt |
| 16 | 16 | ||
| 17 | config HAVE_FUNCTION_TRACER | 17 | config HAVE_FUNCTION_TRACER |
| 18 | bool | 18 | bool |
| 19 | help | 19 | help |
| 20 | See Documentation/trace/ftrace-implementation.txt | 20 | See Documentation/trace/ftrace-design.txt |
| 21 | 21 | ||
| 22 | config HAVE_FUNCTION_GRAPH_TRACER | 22 | config HAVE_FUNCTION_GRAPH_TRACER |
| 23 | bool | 23 | bool |
| 24 | help | 24 | help |
| 25 | See Documentation/trace/ftrace-implementation.txt | 25 | See Documentation/trace/ftrace-design.txt |
| 26 | 26 | ||
| 27 | config HAVE_FUNCTION_GRAPH_FP_TEST | 27 | config HAVE_FUNCTION_GRAPH_FP_TEST |
| 28 | bool | 28 | bool |
| @@ -34,17 +34,17 @@ config HAVE_FUNCTION_GRAPH_FP_TEST | |||
| 34 | config HAVE_FUNCTION_TRACE_MCOUNT_TEST | 34 | config HAVE_FUNCTION_TRACE_MCOUNT_TEST |
| 35 | bool | 35 | bool |
| 36 | help | 36 | help |
| 37 | See Documentation/trace/ftrace-implementation.txt | 37 | See Documentation/trace/ftrace-design.txt |
| 38 | 38 | ||
| 39 | config HAVE_DYNAMIC_FTRACE | 39 | config HAVE_DYNAMIC_FTRACE |
| 40 | bool | 40 | bool |
| 41 | help | 41 | help |
| 42 | See Documentation/trace/ftrace-implementation.txt | 42 | See Documentation/trace/ftrace-design.txt |
| 43 | 43 | ||
| 44 | config HAVE_FTRACE_MCOUNT_RECORD | 44 | config HAVE_FTRACE_MCOUNT_RECORD |
| 45 | bool | 45 | bool |
| 46 | help | 46 | help |
| 47 | See Documentation/trace/ftrace-implementation.txt | 47 | See Documentation/trace/ftrace-design.txt |
| 48 | 48 | ||
| 49 | config HAVE_HW_BRANCH_TRACER | 49 | config HAVE_HW_BRANCH_TRACER |
| 50 | bool | 50 | bool |
| @@ -52,7 +52,7 @@ config HAVE_HW_BRANCH_TRACER | |||
| 52 | config HAVE_SYSCALL_TRACEPOINTS | 52 | config HAVE_SYSCALL_TRACEPOINTS |
| 53 | bool | 53 | bool |
| 54 | help | 54 | help |
| 55 | See Documentation/trace/ftrace-implementation.txt | 55 | See Documentation/trace/ftrace-design.txt |
| 56 | 56 | ||
| 57 | config TRACER_MAX_TRACE | 57 | config TRACER_MAX_TRACE |
| 58 | bool | 58 | bool |
| @@ -83,7 +83,7 @@ config RING_BUFFER_ALLOW_SWAP | |||
| 83 | # This allows those options to appear when no other tracer is selected. But the | 83 | # This allows those options to appear when no other tracer is selected. But the |
| 84 | # options do not appear when something else selects it. We need the two options | 84 | # options do not appear when something else selects it. We need the two options |
| 85 | # GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the | 85 | # GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the |
| 86 | # hidding of the automatic options. | 86 | # hiding of the automatic options. |
| 87 | 87 | ||
| 88 | config TRACING | 88 | config TRACING |
| 89 | bool | 89 | bool |
| @@ -119,7 +119,7 @@ menuconfig FTRACE | |||
| 119 | bool "Tracers" | 119 | bool "Tracers" |
| 120 | default y if DEBUG_KERNEL | 120 | default y if DEBUG_KERNEL |
| 121 | help | 121 | help |
| 122 | Enable the kernel tracing infrastructure. | 122 | Enable the kernel tracing infrastructure. |
| 123 | 123 | ||
| 124 | if FTRACE | 124 | if FTRACE |
| 125 | 125 | ||
| @@ -133,7 +133,7 @@ config FUNCTION_TRACER | |||
| 133 | help | 133 | help |
| 134 | Enable the kernel to trace every kernel function. This is done | 134 | Enable the kernel to trace every kernel function. This is done |
| 135 | by using a compiler feature to insert a small, 5-byte No-Operation | 135 | by using a compiler feature to insert a small, 5-byte No-Operation |
| 136 | instruction to the beginning of every kernel function, which NOP | 136 | instruction at the beginning of every kernel function, which NOP |
| 137 | sequence is then dynamically patched into a tracer call when | 137 | sequence is then dynamically patched into a tracer call when |
| 138 | tracing is enabled by the administrator. If it's runtime disabled | 138 | tracing is enabled by the administrator. If it's runtime disabled |
| 139 | (the bootup default), then the overhead of the instructions is very | 139 | (the bootup default), then the overhead of the instructions is very |
| @@ -150,7 +150,7 @@ config FUNCTION_GRAPH_TRACER | |||
| 150 | and its entry. | 150 | and its entry. |
| 151 | Its first purpose is to trace the duration of functions and | 151 | Its first purpose is to trace the duration of functions and |
| 152 | draw a call graph for each thread with some information like | 152 | draw a call graph for each thread with some information like |
| 153 | the return value. This is done by setting the current return | 153 | the return value. This is done by setting the current return |
| 154 | address on the current task structure into a stack of calls. | 154 | address on the current task structure into a stack of calls. |
| 155 | 155 | ||
| 156 | 156 | ||
| @@ -173,7 +173,7 @@ config IRQSOFF_TRACER | |||
| 173 | 173 | ||
| 174 | echo 0 > /sys/kernel/debug/tracing/tracing_max_latency | 174 | echo 0 > /sys/kernel/debug/tracing/tracing_max_latency |
| 175 | 175 | ||
| 176 | (Note that kernel size and overhead increases with this option | 176 | (Note that kernel size and overhead increase with this option |
| 177 | enabled. This option and the preempt-off timing option can be | 177 | enabled. This option and the preempt-off timing option can be |
| 178 | used together or separately.) | 178 | used together or separately.) |
| 179 | 179 | ||
| @@ -186,7 +186,7 @@ config PREEMPT_TRACER | |||
| 186 | select TRACER_MAX_TRACE | 186 | select TRACER_MAX_TRACE |
| 187 | select RING_BUFFER_ALLOW_SWAP | 187 | select RING_BUFFER_ALLOW_SWAP |
| 188 | help | 188 | help |
| 189 | This option measures the time spent in preemption off critical | 189 | This option measures the time spent in preemption-off critical |
| 190 | sections, with microsecond accuracy. | 190 | sections, with microsecond accuracy. |
| 191 | 191 | ||
| 192 | The default measurement method is a maximum search, which is | 192 | The default measurement method is a maximum search, which is |
| @@ -195,7 +195,7 @@ config PREEMPT_TRACER | |||
| 195 | 195 | ||
| 196 | echo 0 > /sys/kernel/debug/tracing/tracing_max_latency | 196 | echo 0 > /sys/kernel/debug/tracing/tracing_max_latency |
| 197 | 197 | ||
| 198 | (Note that kernel size and overhead increases with this option | 198 | (Note that kernel size and overhead increase with this option |
| 199 | enabled. This option and the irqs-off timing option can be | 199 | enabled. This option and the irqs-off timing option can be |
| 200 | used together or separately.) | 200 | used together or separately.) |
| 201 | 201 | ||
| @@ -222,7 +222,7 @@ config ENABLE_DEFAULT_TRACERS | |||
| 222 | depends on !GENERIC_TRACER | 222 | depends on !GENERIC_TRACER |
| 223 | select TRACING | 223 | select TRACING |
| 224 | help | 224 | help |
| 225 | This tracer hooks to various trace points in the kernel | 225 | This tracer hooks to various trace points in the kernel, |
| 226 | allowing the user to pick and choose which trace point they | 226 | allowing the user to pick and choose which trace point they |
| 227 | want to trace. It also includes the sched_switch tracer plugin. | 227 | want to trace. It also includes the sched_switch tracer plugin. |
| 228 | 228 | ||
| @@ -265,19 +265,19 @@ choice | |||
| 265 | The likely/unlikely profiler only looks at the conditions that | 265 | The likely/unlikely profiler only looks at the conditions that |
| 266 | are annotated with a likely or unlikely macro. | 266 | are annotated with a likely or unlikely macro. |
| 267 | 267 | ||
| 268 | The "all branch" profiler will profile every if statement in the | 268 | The "all branch" profiler will profile every if-statement in the |
| 269 | kernel. This profiler will also enable the likely/unlikely | 269 | kernel. This profiler will also enable the likely/unlikely |
| 270 | profiler as well. | 270 | profiler. |
| 271 | 271 | ||
| 272 | Either of the above profilers add a bit of overhead to the system. | 272 | Either of the above profilers adds a bit of overhead to the system. |
| 273 | If unsure choose "No branch profiling". | 273 | If unsure, choose "No branch profiling". |
| 274 | 274 | ||
| 275 | config BRANCH_PROFILE_NONE | 275 | config BRANCH_PROFILE_NONE |
| 276 | bool "No branch profiling" | 276 | bool "No branch profiling" |
| 277 | help | 277 | help |
| 278 | No branch profiling. Branch profiling adds a bit of overhead. | 278 | No branch profiling. Branch profiling adds a bit of overhead. |
| 279 | Only enable it if you want to analyse the branching behavior. | 279 | Only enable it if you want to analyse the branching behavior. |
| 280 | Otherwise keep it disabled. | 280 | Otherwise keep it disabled. |
| 281 | 281 | ||
| 282 | config PROFILE_ANNOTATED_BRANCHES | 282 | config PROFILE_ANNOTATED_BRANCHES |
| 283 | bool "Trace likely/unlikely profiler" | 283 | bool "Trace likely/unlikely profiler" |
| @@ -288,7 +288,7 @@ config PROFILE_ANNOTATED_BRANCHES | |||
| 288 | 288 | ||
| 289 | /sys/kernel/debug/tracing/profile_annotated_branch | 289 | /sys/kernel/debug/tracing/profile_annotated_branch |
| 290 | 290 | ||
| 291 | Note: this will add a significant overhead, only turn this | 291 | Note: this will add a significant overhead; only turn this |
| 292 | on if you need to profile the system's use of these macros. | 292 | on if you need to profile the system's use of these macros. |
| 293 | 293 | ||
| 294 | config PROFILE_ALL_BRANCHES | 294 | config PROFILE_ALL_BRANCHES |
| @@ -305,7 +305,7 @@ config PROFILE_ALL_BRANCHES | |||
| 305 | 305 | ||
| 306 | This configuration, when enabled, will impose a great overhead | 306 | This configuration, when enabled, will impose a great overhead |
| 307 | on the system. This should only be enabled when the system | 307 | on the system. This should only be enabled when the system |
| 308 | is to be analyzed | 308 | is to be analyzed in much detail. |
| 309 | endchoice | 309 | endchoice |
| 310 | 310 | ||
| 311 | config TRACING_BRANCHES | 311 | config TRACING_BRANCHES |
| @@ -335,7 +335,7 @@ config POWER_TRACER | |||
| 335 | depends on X86 | 335 | depends on X86 |
| 336 | select GENERIC_TRACER | 336 | select GENERIC_TRACER |
| 337 | help | 337 | help |
| 338 | This tracer helps developers to analyze and optimize the kernels | 338 | This tracer helps developers to analyze and optimize the kernel's |
| 339 | power management decisions, specifically the C-state and P-state | 339 | power management decisions, specifically the C-state and P-state |
| 340 | behavior. | 340 | behavior. |
| 341 | 341 | ||
| @@ -391,14 +391,14 @@ config HW_BRANCH_TRACER | |||
| 391 | select GENERIC_TRACER | 391 | select GENERIC_TRACER |
| 392 | help | 392 | help |
| 393 | This tracer records all branches on the system in a circular | 393 | This tracer records all branches on the system in a circular |
| 394 | buffer giving access to the last N branches for each cpu. | 394 | buffer, giving access to the last N branches for each cpu. |
| 395 | 395 | ||
| 396 | config KMEMTRACE | 396 | config KMEMTRACE |
| 397 | bool "Trace SLAB allocations" | 397 | bool "Trace SLAB allocations" |
| 398 | select GENERIC_TRACER | 398 | select GENERIC_TRACER |
| 399 | help | 399 | help |
| 400 | kmemtrace provides tracing for slab allocator functions, such as | 400 | kmemtrace provides tracing for slab allocator functions, such as |
| 401 | kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected | 401 | kmalloc, kfree, kmem_cache_alloc, kmem_cache_free, etc. Collected |
| 402 | data is then fed to the userspace application in order to analyse | 402 | data is then fed to the userspace application in order to analyse |
| 403 | allocation hotspots, internal fragmentation and so on, making it | 403 | allocation hotspots, internal fragmentation and so on, making it |
| 404 | possible to see how well an allocator performs, as well as debug | 404 | possible to see how well an allocator performs, as well as debug |
| @@ -417,15 +417,15 @@ config WORKQUEUE_TRACER | |||
| 417 | bool "Trace workqueues" | 417 | bool "Trace workqueues" |
| 418 | select GENERIC_TRACER | 418 | select GENERIC_TRACER |
| 419 | help | 419 | help |
| 420 | The workqueue tracer provides some statistical informations | 420 | The workqueue tracer provides some statistical information |
| 421 | about each cpu workqueue thread such as the number of the | 421 | about each cpu workqueue thread such as the number of the |
| 422 | works inserted and executed since their creation. It can help | 422 | works inserted and executed since their creation. It can help |
| 423 | to evaluate the amount of work each of them have to perform. | 423 | to evaluate the amount of work each of them has to perform. |
| 424 | For example it can help a developer to decide whether he should | 424 | For example it can help a developer to decide whether he should |
| 425 | choose a per cpu workqueue instead of a singlethreaded one. | 425 | choose a per-cpu workqueue instead of a singlethreaded one. |
| 426 | 426 | ||
| 427 | config BLK_DEV_IO_TRACE | 427 | config BLK_DEV_IO_TRACE |
| 428 | bool "Support for tracing block io actions" | 428 | bool "Support for tracing block IO actions" |
| 429 | depends on SYSFS | 429 | depends on SYSFS |
| 430 | depends on BLOCK | 430 | depends on BLOCK |
| 431 | select RELAY | 431 | select RELAY |
| @@ -456,15 +456,15 @@ config KPROBE_EVENT | |||
| 456 | select TRACING | 456 | select TRACING |
| 457 | default y | 457 | default y |
| 458 | help | 458 | help |
| 459 | This allows the user to add tracing events (similar to tracepoints) on the fly | 459 | This allows the user to add tracing events (similar to tracepoints) |
| 460 | via the ftrace interface. See Documentation/trace/kprobetrace.txt | 460 | on the fly via the ftrace interface. See |
| 461 | for more details. | 461 | Documentation/trace/kprobetrace.txt for more details. |
| 462 | 462 | ||
| 463 | Those events can be inserted wherever kprobes can probe, and record | 463 | Those events can be inserted wherever kprobes can probe, and record |
| 464 | various register and memory values. | 464 | various register and memory values. |
| 465 | 465 | ||
| 466 | This option is also required by perf-probe subcommand of perf tools. If | 466 | This option is also required by perf-probe subcommand of perf tools. |
| 467 | you want to use perf tools, this option is strongly recommended. | 467 | If you want to use perf tools, this option is strongly recommended. |
| 468 | 468 | ||
| 469 | config DYNAMIC_FTRACE | 469 | config DYNAMIC_FTRACE |
| 470 | bool "enable/disable ftrace tracepoints dynamically" | 470 | bool "enable/disable ftrace tracepoints dynamically" |
| @@ -472,32 +472,32 @@ config DYNAMIC_FTRACE | |||
| 472 | depends on HAVE_DYNAMIC_FTRACE | 472 | depends on HAVE_DYNAMIC_FTRACE |
| 473 | default y | 473 | default y |
| 474 | help | 474 | help |
| 475 | This option will modify all the calls to ftrace dynamically | 475 | This option will modify all the calls to ftrace dynamically |
| 476 | (will patch them out of the binary image and replaces them | 476 | (will patch them out of the binary image and replace them |
| 477 | with a No-Op instruction) as they are called. A table is | 477 | with a No-Op instruction) as they are called. A table is |
| 478 | created to dynamically enable them again. | 478 | created to dynamically enable them again. |
| 479 | 479 | ||
| 480 | This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but otherwise | 480 | This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but |
| 481 | has native performance as long as no tracing is active. | 481 | otherwise has native performance as long as no tracing is active. |
| 482 | 482 | ||
| 483 | The changes to the code are done by a kernel thread that | 483 | The changes to the code are done by a kernel thread that |
| 484 | wakes up once a second and checks to see if any ftrace calls | 484 | wakes up once a second and checks to see if any ftrace calls |
| 485 | were made. If so, it runs stop_machine (stops all CPUS) | 485 | were made. If so, it runs stop_machine (stops all CPUS) |
| 486 | and modifies the code to jump over the call to ftrace. | 486 | and modifies the code to jump over the call to ftrace. |
| 487 | 487 | ||
| 488 | config FUNCTION_PROFILER | 488 | config FUNCTION_PROFILER |
| 489 | bool "Kernel function profiler" | 489 | bool "Kernel function profiler" |
| 490 | depends on FUNCTION_TRACER | 490 | depends on FUNCTION_TRACER |
| 491 | default n | 491 | default n |
| 492 | help | 492 | help |
| 493 | This option enables the kernel function profiler. A file is created | 493 | This option enables the kernel function profiler. A file is created |
| 494 | in debugfs called function_profile_enabled which defaults to zero. | 494 | in debugfs called function_profile_enabled which defaults to zero. |
| 495 | When a 1 is echoed into this file profiling begins, and when a | 495 | When a 1 is echoed into this file profiling begins, and when a |
| 496 | zero is entered, profiling stops. A file in the trace_stats | 496 | zero is entered, profiling stops. A "functions" file is created in |
| 497 | directory called functions, that show the list of functions that | 497 | the trace_stats directory; this file shows the list of functions that |
| 498 | have been hit and their counters. | 498 | have been hit and their counters. |
| 499 | 499 | ||
| 500 | If in doubt, say N | 500 | If in doubt, say N. |
| 501 | 501 | ||
| 502 | config FTRACE_MCOUNT_RECORD | 502 | config FTRACE_MCOUNT_RECORD |
| 503 | def_bool y | 503 | def_bool y |
| @@ -556,8 +556,8 @@ config RING_BUFFER_BENCHMARK | |||
| 556 | tristate "Ring buffer benchmark stress tester" | 556 | tristate "Ring buffer benchmark stress tester" |
| 557 | depends on RING_BUFFER | 557 | depends on RING_BUFFER |
| 558 | help | 558 | help |
| 559 | This option creates a test to stress the ring buffer and bench mark it. | 559 | This option creates a test to stress the ring buffer and benchmark it. |
| 560 | It creates its own ring buffer such that it will not interfer with | 560 | It creates its own ring buffer such that it will not interfere with |
| 561 | any other users of the ring buffer (such as ftrace). It then creates | 561 | any other users of the ring buffer (such as ftrace). It then creates |
| 562 | a producer and consumer that will run for 10 seconds and sleep for | 562 | a producer and consumer that will run for 10 seconds and sleep for |
| 563 | 10 seconds. Each interval it will print out the number of events | 563 | 10 seconds. Each interval it will print out the number of events |
| @@ -566,7 +566,7 @@ config RING_BUFFER_BENCHMARK | |||
| 566 | It does not disable interrupts or raise its priority, so it may be | 566 | It does not disable interrupts or raise its priority, so it may be |
| 567 | affected by processes that are running. | 567 | affected by processes that are running. |
| 568 | 568 | ||
| 569 | If unsure, say N | 569 | If unsure, say N. |
| 570 | 570 | ||
| 571 | endif # FTRACE | 571 | endif # FTRACE |
| 572 | 572 | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index e51a1bcb7bed..1e6640f80454 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -1690,7 +1690,7 @@ ftrace_regex_lseek(struct file *file, loff_t offset, int origin) | |||
| 1690 | static int ftrace_match(char *str, char *regex, int len, int type) | 1690 | static int ftrace_match(char *str, char *regex, int len, int type) |
| 1691 | { | 1691 | { |
| 1692 | int matched = 0; | 1692 | int matched = 0; |
| 1693 | char *ptr; | 1693 | int slen; |
| 1694 | 1694 | ||
| 1695 | switch (type) { | 1695 | switch (type) { |
| 1696 | case MATCH_FULL: | 1696 | case MATCH_FULL: |
| @@ -1706,8 +1706,8 @@ static int ftrace_match(char *str, char *regex, int len, int type) | |||
| 1706 | matched = 1; | 1706 | matched = 1; |
| 1707 | break; | 1707 | break; |
| 1708 | case MATCH_END_ONLY: | 1708 | case MATCH_END_ONLY: |
| 1709 | ptr = strstr(str, regex); | 1709 | slen = strlen(str); |
| 1710 | if (ptr && (ptr[len] == 0)) | 1710 | if (slen >= len && memcmp(str + slen - len, regex, len) == 0) |
| 1711 | matched = 1; | 1711 | matched = 1; |
| 1712 | break; | 1712 | break; |
| 1713 | } | 1713 | } |
| @@ -1724,7 +1724,7 @@ ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type) | |||
| 1724 | return ftrace_match(str, regex, len, type); | 1724 | return ftrace_match(str, regex, len, type); |
| 1725 | } | 1725 | } |
| 1726 | 1726 | ||
| 1727 | static void ftrace_match_records(char *buff, int len, int enable) | 1727 | static int ftrace_match_records(char *buff, int len, int enable) |
| 1728 | { | 1728 | { |
| 1729 | unsigned int search_len; | 1729 | unsigned int search_len; |
| 1730 | struct ftrace_page *pg; | 1730 | struct ftrace_page *pg; |
| @@ -1733,6 +1733,7 @@ static void ftrace_match_records(char *buff, int len, int enable) | |||
| 1733 | char *search; | 1733 | char *search; |
| 1734 | int type; | 1734 | int type; |
| 1735 | int not; | 1735 | int not; |
| 1736 | int found = 0; | ||
| 1736 | 1737 | ||
| 1737 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | 1738 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; |
| 1738 | type = filter_parse_regex(buff, len, &search, ¬); | 1739 | type = filter_parse_regex(buff, len, &search, ¬); |
| @@ -1750,6 +1751,7 @@ static void ftrace_match_records(char *buff, int len, int enable) | |||
| 1750 | rec->flags &= ~flag; | 1751 | rec->flags &= ~flag; |
| 1751 | else | 1752 | else |
| 1752 | rec->flags |= flag; | 1753 | rec->flags |= flag; |
| 1754 | found = 1; | ||
| 1753 | } | 1755 | } |
| 1754 | /* | 1756 | /* |
| 1755 | * Only enable filtering if we have a function that | 1757 | * Only enable filtering if we have a function that |
| @@ -1759,6 +1761,8 @@ static void ftrace_match_records(char *buff, int len, int enable) | |||
| 1759 | ftrace_filtered = 1; | 1761 | ftrace_filtered = 1; |
| 1760 | } while_for_each_ftrace_rec(); | 1762 | } while_for_each_ftrace_rec(); |
| 1761 | mutex_unlock(&ftrace_lock); | 1763 | mutex_unlock(&ftrace_lock); |
| 1764 | |||
| 1765 | return found; | ||
| 1762 | } | 1766 | } |
| 1763 | 1767 | ||
| 1764 | static int | 1768 | static int |
| @@ -1780,7 +1784,7 @@ ftrace_match_module_record(struct dyn_ftrace *rec, char *mod, | |||
| 1780 | return 1; | 1784 | return 1; |
| 1781 | } | 1785 | } |
| 1782 | 1786 | ||
| 1783 | static void ftrace_match_module_records(char *buff, char *mod, int enable) | 1787 | static int ftrace_match_module_records(char *buff, char *mod, int enable) |
| 1784 | { | 1788 | { |
| 1785 | unsigned search_len = 0; | 1789 | unsigned search_len = 0; |
| 1786 | struct ftrace_page *pg; | 1790 | struct ftrace_page *pg; |
| @@ -1789,6 +1793,7 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable) | |||
| 1789 | char *search = buff; | 1793 | char *search = buff; |
| 1790 | unsigned long flag; | 1794 | unsigned long flag; |
| 1791 | int not = 0; | 1795 | int not = 0; |
| 1796 | int found = 0; | ||
| 1792 | 1797 | ||
| 1793 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | 1798 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; |
| 1794 | 1799 | ||
| @@ -1819,12 +1824,15 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable) | |||
| 1819 | rec->flags &= ~flag; | 1824 | rec->flags &= ~flag; |
| 1820 | else | 1825 | else |
| 1821 | rec->flags |= flag; | 1826 | rec->flags |= flag; |
| 1827 | found = 1; | ||
| 1822 | } | 1828 | } |
| 1823 | if (enable && (rec->flags & FTRACE_FL_FILTER)) | 1829 | if (enable && (rec->flags & FTRACE_FL_FILTER)) |
| 1824 | ftrace_filtered = 1; | 1830 | ftrace_filtered = 1; |
| 1825 | 1831 | ||
| 1826 | } while_for_each_ftrace_rec(); | 1832 | } while_for_each_ftrace_rec(); |
| 1827 | mutex_unlock(&ftrace_lock); | 1833 | mutex_unlock(&ftrace_lock); |
| 1834 | |||
| 1835 | return found; | ||
| 1828 | } | 1836 | } |
| 1829 | 1837 | ||
| 1830 | /* | 1838 | /* |
| @@ -1853,8 +1861,9 @@ ftrace_mod_callback(char *func, char *cmd, char *param, int enable) | |||
| 1853 | if (!strlen(mod)) | 1861 | if (!strlen(mod)) |
| 1854 | return -EINVAL; | 1862 | return -EINVAL; |
| 1855 | 1863 | ||
| 1856 | ftrace_match_module_records(func, mod, enable); | 1864 | if (ftrace_match_module_records(func, mod, enable)) |
| 1857 | return 0; | 1865 | return 0; |
| 1866 | return -EINVAL; | ||
| 1858 | } | 1867 | } |
| 1859 | 1868 | ||
| 1860 | static struct ftrace_func_command ftrace_mod_cmd = { | 1869 | static struct ftrace_func_command ftrace_mod_cmd = { |
| @@ -2151,8 +2160,9 @@ static int ftrace_process_regex(char *buff, int len, int enable) | |||
| 2151 | func = strsep(&next, ":"); | 2160 | func = strsep(&next, ":"); |
| 2152 | 2161 | ||
| 2153 | if (!next) { | 2162 | if (!next) { |
| 2154 | ftrace_match_records(func, len, enable); | 2163 | if (ftrace_match_records(func, len, enable)) |
| 2155 | return 0; | 2164 | return 0; |
| 2165 | return ret; | ||
| 2156 | } | 2166 | } |
| 2157 | 2167 | ||
| 2158 | /* command found */ | 2168 | /* command found */ |
| @@ -2198,10 +2208,9 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, | |||
| 2198 | !trace_parser_cont(parser)) { | 2208 | !trace_parser_cont(parser)) { |
| 2199 | ret = ftrace_process_regex(parser->buffer, | 2209 | ret = ftrace_process_regex(parser->buffer, |
| 2200 | parser->idx, enable); | 2210 | parser->idx, enable); |
| 2211 | trace_parser_clear(parser); | ||
| 2201 | if (ret) | 2212 | if (ret) |
| 2202 | goto out_unlock; | 2213 | goto out_unlock; |
| 2203 | |||
| 2204 | trace_parser_clear(parser); | ||
| 2205 | } | 2214 | } |
| 2206 | 2215 | ||
| 2207 | ret = read; | 2216 | ret = read; |
| @@ -2543,10 +2552,9 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer) | |||
| 2543 | exists = true; | 2552 | exists = true; |
| 2544 | break; | 2553 | break; |
| 2545 | } | 2554 | } |
| 2546 | if (!exists) { | 2555 | if (!exists) |
| 2547 | array[(*idx)++] = rec->ip; | 2556 | array[(*idx)++] = rec->ip; |
| 2548 | found = 1; | 2557 | found = 1; |
| 2549 | } | ||
| 2550 | } | 2558 | } |
| 2551 | } while_for_each_ftrace_rec(); | 2559 | } while_for_each_ftrace_rec(); |
| 2552 | 2560 | ||
diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c index e06c6e3d56a3..9f4f565b01e6 100644 --- a/kernel/trace/power-traces.c +++ b/kernel/trace/power-traces.c | |||
| @@ -14,7 +14,5 @@ | |||
| 14 | #define CREATE_TRACE_POINTS | 14 | #define CREATE_TRACE_POINTS |
| 15 | #include <trace/events/power.h> | 15 | #include <trace/events/power.h> |
| 16 | 16 | ||
| 17 | EXPORT_TRACEPOINT_SYMBOL_GPL(power_start); | ||
| 18 | EXPORT_TRACEPOINT_SYMBOL_GPL(power_end); | ||
| 19 | EXPORT_TRACEPOINT_SYMBOL_GPL(power_frequency); | 17 | EXPORT_TRACEPOINT_SYMBOL_GPL(power_frequency); |
| 20 | 18 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index a1ca4956ab5e..edefe3b2801b 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -423,7 +423,7 @@ struct ring_buffer_per_cpu { | |||
| 423 | int cpu; | 423 | int cpu; |
| 424 | struct ring_buffer *buffer; | 424 | struct ring_buffer *buffer; |
| 425 | spinlock_t reader_lock; /* serialize readers */ | 425 | spinlock_t reader_lock; /* serialize readers */ |
| 426 | raw_spinlock_t lock; | 426 | arch_spinlock_t lock; |
| 427 | struct lock_class_key lock_key; | 427 | struct lock_class_key lock_key; |
| 428 | struct list_head *pages; | 428 | struct list_head *pages; |
| 429 | struct buffer_page *head_page; /* read from head */ | 429 | struct buffer_page *head_page; /* read from head */ |
| @@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | |||
| 998 | cpu_buffer->buffer = buffer; | 998 | cpu_buffer->buffer = buffer; |
| 999 | spin_lock_init(&cpu_buffer->reader_lock); | 999 | spin_lock_init(&cpu_buffer->reader_lock); |
| 1000 | lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); | 1000 | lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); |
| 1001 | cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 1001 | cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
| 1002 | 1002 | ||
| 1003 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), | 1003 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), |
| 1004 | GFP_KERNEL, cpu_to_node(cpu)); | 1004 | GFP_KERNEL, cpu_to_node(cpu)); |
| @@ -1193,9 +1193,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
| 1193 | struct list_head *p; | 1193 | struct list_head *p; |
| 1194 | unsigned i; | 1194 | unsigned i; |
| 1195 | 1195 | ||
| 1196 | atomic_inc(&cpu_buffer->record_disabled); | ||
| 1197 | synchronize_sched(); | ||
| 1198 | |||
| 1199 | spin_lock_irq(&cpu_buffer->reader_lock); | 1196 | spin_lock_irq(&cpu_buffer->reader_lock); |
| 1200 | rb_head_page_deactivate(cpu_buffer); | 1197 | rb_head_page_deactivate(cpu_buffer); |
| 1201 | 1198 | ||
| @@ -1211,12 +1208,9 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
| 1211 | return; | 1208 | return; |
| 1212 | 1209 | ||
| 1213 | rb_reset_cpu(cpu_buffer); | 1210 | rb_reset_cpu(cpu_buffer); |
| 1214 | spin_unlock_irq(&cpu_buffer->reader_lock); | ||
| 1215 | |||
| 1216 | rb_check_pages(cpu_buffer); | 1211 | rb_check_pages(cpu_buffer); |
| 1217 | 1212 | ||
| 1218 | atomic_dec(&cpu_buffer->record_disabled); | 1213 | spin_unlock_irq(&cpu_buffer->reader_lock); |
| 1219 | |||
| 1220 | } | 1214 | } |
| 1221 | 1215 | ||
| 1222 | static void | 1216 | static void |
| @@ -1227,9 +1221,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 1227 | struct list_head *p; | 1221 | struct list_head *p; |
| 1228 | unsigned i; | 1222 | unsigned i; |
| 1229 | 1223 | ||
| 1230 | atomic_inc(&cpu_buffer->record_disabled); | ||
| 1231 | synchronize_sched(); | ||
| 1232 | |||
| 1233 | spin_lock_irq(&cpu_buffer->reader_lock); | 1224 | spin_lock_irq(&cpu_buffer->reader_lock); |
| 1234 | rb_head_page_deactivate(cpu_buffer); | 1225 | rb_head_page_deactivate(cpu_buffer); |
| 1235 | 1226 | ||
| @@ -1242,11 +1233,9 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 1242 | list_add_tail(&bpage->list, cpu_buffer->pages); | 1233 | list_add_tail(&bpage->list, cpu_buffer->pages); |
| 1243 | } | 1234 | } |
| 1244 | rb_reset_cpu(cpu_buffer); | 1235 | rb_reset_cpu(cpu_buffer); |
| 1245 | spin_unlock_irq(&cpu_buffer->reader_lock); | ||
| 1246 | |||
| 1247 | rb_check_pages(cpu_buffer); | 1236 | rb_check_pages(cpu_buffer); |
| 1248 | 1237 | ||
| 1249 | atomic_dec(&cpu_buffer->record_disabled); | 1238 | spin_unlock_irq(&cpu_buffer->reader_lock); |
| 1250 | } | 1239 | } |
| 1251 | 1240 | ||
| 1252 | /** | 1241 | /** |
| @@ -1254,11 +1243,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 1254 | * @buffer: the buffer to resize. | 1243 | * @buffer: the buffer to resize. |
| 1255 | * @size: the new size. | 1244 | * @size: the new size. |
| 1256 | * | 1245 | * |
| 1257 | * The tracer is responsible for making sure that the buffer is | ||
| 1258 | * not being used while changing the size. | ||
| 1259 | * Note: We may be able to change the above requirement by using | ||
| 1260 | * RCU synchronizations. | ||
| 1261 | * | ||
| 1262 | * Minimum size is 2 * BUF_PAGE_SIZE. | 1246 | * Minimum size is 2 * BUF_PAGE_SIZE. |
| 1263 | * | 1247 | * |
| 1264 | * Returns -1 on failure. | 1248 | * Returns -1 on failure. |
| @@ -1290,6 +1274,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
| 1290 | if (size == buffer_size) | 1274 | if (size == buffer_size) |
| 1291 | return size; | 1275 | return size; |
| 1292 | 1276 | ||
| 1277 | atomic_inc(&buffer->record_disabled); | ||
| 1278 | |||
| 1279 | /* Make sure all writers are done with this buffer. */ | ||
| 1280 | synchronize_sched(); | ||
| 1281 | |||
| 1293 | mutex_lock(&buffer->mutex); | 1282 | mutex_lock(&buffer->mutex); |
| 1294 | get_online_cpus(); | 1283 | get_online_cpus(); |
| 1295 | 1284 | ||
| @@ -1352,6 +1341,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
| 1352 | put_online_cpus(); | 1341 | put_online_cpus(); |
| 1353 | mutex_unlock(&buffer->mutex); | 1342 | mutex_unlock(&buffer->mutex); |
| 1354 | 1343 | ||
| 1344 | atomic_dec(&buffer->record_disabled); | ||
| 1345 | |||
| 1355 | return size; | 1346 | return size; |
| 1356 | 1347 | ||
| 1357 | free_pages: | 1348 | free_pages: |
| @@ -1361,6 +1352,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
| 1361 | } | 1352 | } |
| 1362 | put_online_cpus(); | 1353 | put_online_cpus(); |
| 1363 | mutex_unlock(&buffer->mutex); | 1354 | mutex_unlock(&buffer->mutex); |
| 1355 | atomic_dec(&buffer->record_disabled); | ||
| 1364 | return -ENOMEM; | 1356 | return -ENOMEM; |
| 1365 | 1357 | ||
| 1366 | /* | 1358 | /* |
| @@ -1370,6 +1362,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
| 1370 | out_fail: | 1362 | out_fail: |
| 1371 | put_online_cpus(); | 1363 | put_online_cpus(); |
| 1372 | mutex_unlock(&buffer->mutex); | 1364 | mutex_unlock(&buffer->mutex); |
| 1365 | atomic_dec(&buffer->record_disabled); | ||
| 1373 | return -1; | 1366 | return -1; |
| 1374 | } | 1367 | } |
| 1375 | EXPORT_SYMBOL_GPL(ring_buffer_resize); | 1368 | EXPORT_SYMBOL_GPL(ring_buffer_resize); |
| @@ -2834,7 +2827,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 2834 | int ret; | 2827 | int ret; |
| 2835 | 2828 | ||
| 2836 | local_irq_save(flags); | 2829 | local_irq_save(flags); |
| 2837 | __raw_spin_lock(&cpu_buffer->lock); | 2830 | arch_spin_lock(&cpu_buffer->lock); |
| 2838 | 2831 | ||
| 2839 | again: | 2832 | again: |
| 2840 | /* | 2833 | /* |
| @@ -2876,7 +2869,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 2876 | * Splice the empty reader page into the list around the head. | 2869 | * Splice the empty reader page into the list around the head. |
| 2877 | */ | 2870 | */ |
| 2878 | reader = rb_set_head_page(cpu_buffer); | 2871 | reader = rb_set_head_page(cpu_buffer); |
| 2879 | cpu_buffer->reader_page->list.next = reader->list.next; | 2872 | cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); |
| 2880 | cpu_buffer->reader_page->list.prev = reader->list.prev; | 2873 | cpu_buffer->reader_page->list.prev = reader->list.prev; |
| 2881 | 2874 | ||
| 2882 | /* | 2875 | /* |
| @@ -2913,7 +2906,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 2913 | * | 2906 | * |
| 2914 | * Now make the new head point back to the reader page. | 2907 | * Now make the new head point back to the reader page. |
| 2915 | */ | 2908 | */ |
| 2916 | reader->list.next->prev = &cpu_buffer->reader_page->list; | 2909 | rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; |
| 2917 | rb_inc_page(cpu_buffer, &cpu_buffer->head_page); | 2910 | rb_inc_page(cpu_buffer, &cpu_buffer->head_page); |
| 2918 | 2911 | ||
| 2919 | /* Finally update the reader page to the new head */ | 2912 | /* Finally update the reader page to the new head */ |
| @@ -2923,7 +2916,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 2923 | goto again; | 2916 | goto again; |
| 2924 | 2917 | ||
| 2925 | out: | 2918 | out: |
| 2926 | __raw_spin_unlock(&cpu_buffer->lock); | 2919 | arch_spin_unlock(&cpu_buffer->lock); |
| 2927 | local_irq_restore(flags); | 2920 | local_irq_restore(flags); |
| 2928 | 2921 | ||
| 2929 | return reader; | 2922 | return reader; |
| @@ -3286,9 +3279,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
| 3286 | synchronize_sched(); | 3279 | synchronize_sched(); |
| 3287 | 3280 | ||
| 3288 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3281 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| 3289 | __raw_spin_lock(&cpu_buffer->lock); | 3282 | arch_spin_lock(&cpu_buffer->lock); |
| 3290 | rb_iter_reset(iter); | 3283 | rb_iter_reset(iter); |
| 3291 | __raw_spin_unlock(&cpu_buffer->lock); | 3284 | arch_spin_unlock(&cpu_buffer->lock); |
| 3292 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3285 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| 3293 | 3286 | ||
| 3294 | return iter; | 3287 | return iter; |
| @@ -3408,11 +3401,11 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
| 3408 | if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) | 3401 | if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) |
| 3409 | goto out; | 3402 | goto out; |
| 3410 | 3403 | ||
| 3411 | __raw_spin_lock(&cpu_buffer->lock); | 3404 | arch_spin_lock(&cpu_buffer->lock); |
| 3412 | 3405 | ||
| 3413 | rb_reset_cpu(cpu_buffer); | 3406 | rb_reset_cpu(cpu_buffer); |
| 3414 | 3407 | ||
| 3415 | __raw_spin_unlock(&cpu_buffer->lock); | 3408 | arch_spin_unlock(&cpu_buffer->lock); |
| 3416 | 3409 | ||
| 3417 | out: | 3410 | out: |
| 3418 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3411 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c82dfd92fdfd..0df1b0f2cb9e 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -12,7 +12,7 @@ | |||
| 12 | * Copyright (C) 2004 William Lee Irwin III | 12 | * Copyright (C) 2004 William Lee Irwin III |
| 13 | */ | 13 | */ |
| 14 | #include <linux/ring_buffer.h> | 14 | #include <linux/ring_buffer.h> |
| 15 | #include <linux/utsrelease.h> | 15 | #include <generated/utsrelease.h> |
| 16 | #include <linux/stacktrace.h> | 16 | #include <linux/stacktrace.h> |
| 17 | #include <linux/writeback.h> | 17 | #include <linux/writeback.h> |
| 18 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
| @@ -313,7 +313,6 @@ static const char *trace_options[] = { | |||
| 313 | "bin", | 313 | "bin", |
| 314 | "block", | 314 | "block", |
| 315 | "stacktrace", | 315 | "stacktrace", |
| 316 | "sched-tree", | ||
| 317 | "trace_printk", | 316 | "trace_printk", |
| 318 | "ftrace_preempt", | 317 | "ftrace_preempt", |
| 319 | "branch", | 318 | "branch", |
| @@ -493,15 +492,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | |||
| 493 | * protected by per_cpu spinlocks. But the action of the swap | 492 | * protected by per_cpu spinlocks. But the action of the swap |
| 494 | * needs its own lock. | 493 | * needs its own lock. |
| 495 | * | 494 | * |
| 496 | * This is defined as a raw_spinlock_t in order to help | 495 | * This is defined as a arch_spinlock_t in order to help |
| 497 | * with performance when lockdep debugging is enabled. | 496 | * with performance when lockdep debugging is enabled. |
| 498 | * | 497 | * |
| 499 | * It is also used in other places outside the update_max_tr | 498 | * It is also used in other places outside the update_max_tr |
| 500 | * so it needs to be defined outside of the | 499 | * so it needs to be defined outside of the |
| 501 | * CONFIG_TRACER_MAX_TRACE. | 500 | * CONFIG_TRACER_MAX_TRACE. |
| 502 | */ | 501 | */ |
| 503 | static raw_spinlock_t ftrace_max_lock = | 502 | static arch_spinlock_t ftrace_max_lock = |
| 504 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 503 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
| 505 | 504 | ||
| 506 | #ifdef CONFIG_TRACER_MAX_TRACE | 505 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 507 | unsigned long __read_mostly tracing_max_latency; | 506 | unsigned long __read_mostly tracing_max_latency; |
| @@ -555,13 +554,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 555 | return; | 554 | return; |
| 556 | 555 | ||
| 557 | WARN_ON_ONCE(!irqs_disabled()); | 556 | WARN_ON_ONCE(!irqs_disabled()); |
| 558 | __raw_spin_lock(&ftrace_max_lock); | 557 | arch_spin_lock(&ftrace_max_lock); |
| 559 | 558 | ||
| 560 | tr->buffer = max_tr.buffer; | 559 | tr->buffer = max_tr.buffer; |
| 561 | max_tr.buffer = buf; | 560 | max_tr.buffer = buf; |
| 562 | 561 | ||
| 563 | __update_max_tr(tr, tsk, cpu); | 562 | __update_max_tr(tr, tsk, cpu); |
| 564 | __raw_spin_unlock(&ftrace_max_lock); | 563 | arch_spin_unlock(&ftrace_max_lock); |
| 565 | } | 564 | } |
| 566 | 565 | ||
| 567 | /** | 566 | /** |
| @@ -581,7 +580,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 581 | return; | 580 | return; |
| 582 | 581 | ||
| 583 | WARN_ON_ONCE(!irqs_disabled()); | 582 | WARN_ON_ONCE(!irqs_disabled()); |
| 584 | __raw_spin_lock(&ftrace_max_lock); | 583 | arch_spin_lock(&ftrace_max_lock); |
| 585 | 584 | ||
| 586 | ftrace_disable_cpu(); | 585 | ftrace_disable_cpu(); |
| 587 | 586 | ||
| @@ -603,7 +602,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 603 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); | 602 | WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); |
| 604 | 603 | ||
| 605 | __update_max_tr(tr, tsk, cpu); | 604 | __update_max_tr(tr, tsk, cpu); |
| 606 | __raw_spin_unlock(&ftrace_max_lock); | 605 | arch_spin_unlock(&ftrace_max_lock); |
| 607 | } | 606 | } |
| 608 | #endif /* CONFIG_TRACER_MAX_TRACE */ | 607 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
| 609 | 608 | ||
| @@ -802,7 +801,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | |||
| 802 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; | 801 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; |
| 803 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; | 802 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; |
| 804 | static int cmdline_idx; | 803 | static int cmdline_idx; |
| 805 | static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; | 804 | static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
| 806 | 805 | ||
| 807 | /* temporary disable recording */ | 806 | /* temporary disable recording */ |
| 808 | static atomic_t trace_record_cmdline_disabled __read_mostly; | 807 | static atomic_t trace_record_cmdline_disabled __read_mostly; |
| @@ -915,7 +914,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
| 915 | * nor do we want to disable interrupts, | 914 | * nor do we want to disable interrupts, |
| 916 | * so if we miss here, then better luck next time. | 915 | * so if we miss here, then better luck next time. |
| 917 | */ | 916 | */ |
| 918 | if (!__raw_spin_trylock(&trace_cmdline_lock)) | 917 | if (!arch_spin_trylock(&trace_cmdline_lock)) |
| 919 | return; | 918 | return; |
| 920 | 919 | ||
| 921 | idx = map_pid_to_cmdline[tsk->pid]; | 920 | idx = map_pid_to_cmdline[tsk->pid]; |
| @@ -940,7 +939,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
| 940 | 939 | ||
| 941 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); | 940 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); |
| 942 | 941 | ||
| 943 | __raw_spin_unlock(&trace_cmdline_lock); | 942 | arch_spin_unlock(&trace_cmdline_lock); |
| 944 | } | 943 | } |
| 945 | 944 | ||
| 946 | void trace_find_cmdline(int pid, char comm[]) | 945 | void trace_find_cmdline(int pid, char comm[]) |
| @@ -958,14 +957,14 @@ void trace_find_cmdline(int pid, char comm[]) | |||
| 958 | } | 957 | } |
| 959 | 958 | ||
| 960 | preempt_disable(); | 959 | preempt_disable(); |
| 961 | __raw_spin_lock(&trace_cmdline_lock); | 960 | arch_spin_lock(&trace_cmdline_lock); |
| 962 | map = map_pid_to_cmdline[pid]; | 961 | map = map_pid_to_cmdline[pid]; |
| 963 | if (map != NO_CMDLINE_MAP) | 962 | if (map != NO_CMDLINE_MAP) |
| 964 | strcpy(comm, saved_cmdlines[map]); | 963 | strcpy(comm, saved_cmdlines[map]); |
| 965 | else | 964 | else |
| 966 | strcpy(comm, "<...>"); | 965 | strcpy(comm, "<...>"); |
| 967 | 966 | ||
| 968 | __raw_spin_unlock(&trace_cmdline_lock); | 967 | arch_spin_unlock(&trace_cmdline_lock); |
| 969 | preempt_enable(); | 968 | preempt_enable(); |
| 970 | } | 969 | } |
| 971 | 970 | ||
| @@ -1151,6 +1150,22 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, | |||
| 1151 | __ftrace_trace_stack(tr->buffer, flags, skip, pc); | 1150 | __ftrace_trace_stack(tr->buffer, flags, skip, pc); |
| 1152 | } | 1151 | } |
| 1153 | 1152 | ||
| 1153 | /** | ||
| 1154 | * trace_dump_stack - record a stack back trace in the trace buffer | ||
| 1155 | */ | ||
| 1156 | void trace_dump_stack(void) | ||
| 1157 | { | ||
| 1158 | unsigned long flags; | ||
| 1159 | |||
| 1160 | if (tracing_disabled || tracing_selftest_running) | ||
| 1161 | return; | ||
| 1162 | |||
| 1163 | local_save_flags(flags); | ||
| 1164 | |||
| 1165 | /* skipping 3 traces, seems to get us at the caller of this function */ | ||
| 1166 | __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); | ||
| 1167 | } | ||
| 1168 | |||
| 1154 | void | 1169 | void |
| 1155 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | 1170 | ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) |
| 1156 | { | 1171 | { |
| @@ -1251,8 +1266,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | |||
| 1251 | */ | 1266 | */ |
| 1252 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | 1267 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) |
| 1253 | { | 1268 | { |
| 1254 | static raw_spinlock_t trace_buf_lock = | 1269 | static arch_spinlock_t trace_buf_lock = |
| 1255 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 1270 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
| 1256 | static u32 trace_buf[TRACE_BUF_SIZE]; | 1271 | static u32 trace_buf[TRACE_BUF_SIZE]; |
| 1257 | 1272 | ||
| 1258 | struct ftrace_event_call *call = &event_bprint; | 1273 | struct ftrace_event_call *call = &event_bprint; |
| @@ -1283,7 +1298,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1283 | 1298 | ||
| 1284 | /* Lockdep uses trace_printk for lock tracing */ | 1299 | /* Lockdep uses trace_printk for lock tracing */ |
| 1285 | local_irq_save(flags); | 1300 | local_irq_save(flags); |
| 1286 | __raw_spin_lock(&trace_buf_lock); | 1301 | arch_spin_lock(&trace_buf_lock); |
| 1287 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 1302 | len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
| 1288 | 1303 | ||
| 1289 | if (len > TRACE_BUF_SIZE || len < 0) | 1304 | if (len > TRACE_BUF_SIZE || len < 0) |
| @@ -1304,7 +1319,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 1304 | ring_buffer_unlock_commit(buffer, event); | 1319 | ring_buffer_unlock_commit(buffer, event); |
| 1305 | 1320 | ||
| 1306 | out_unlock: | 1321 | out_unlock: |
| 1307 | __raw_spin_unlock(&trace_buf_lock); | 1322 | arch_spin_unlock(&trace_buf_lock); |
| 1308 | local_irq_restore(flags); | 1323 | local_irq_restore(flags); |
| 1309 | 1324 | ||
| 1310 | out: | 1325 | out: |
| @@ -1334,7 +1349,7 @@ int trace_array_printk(struct trace_array *tr, | |||
| 1334 | int trace_array_vprintk(struct trace_array *tr, | 1349 | int trace_array_vprintk(struct trace_array *tr, |
| 1335 | unsigned long ip, const char *fmt, va_list args) | 1350 | unsigned long ip, const char *fmt, va_list args) |
| 1336 | { | 1351 | { |
| 1337 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; | 1352 | static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
| 1338 | static char trace_buf[TRACE_BUF_SIZE]; | 1353 | static char trace_buf[TRACE_BUF_SIZE]; |
| 1339 | 1354 | ||
| 1340 | struct ftrace_event_call *call = &event_print; | 1355 | struct ftrace_event_call *call = &event_print; |
| @@ -1360,7 +1375,7 @@ int trace_array_vprintk(struct trace_array *tr, | |||
| 1360 | 1375 | ||
| 1361 | pause_graph_tracing(); | 1376 | pause_graph_tracing(); |
| 1362 | raw_local_irq_save(irq_flags); | 1377 | raw_local_irq_save(irq_flags); |
| 1363 | __raw_spin_lock(&trace_buf_lock); | 1378 | arch_spin_lock(&trace_buf_lock); |
| 1364 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 1379 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
| 1365 | 1380 | ||
| 1366 | size = sizeof(*entry) + len + 1; | 1381 | size = sizeof(*entry) + len + 1; |
| @@ -1378,7 +1393,7 @@ int trace_array_vprintk(struct trace_array *tr, | |||
| 1378 | ring_buffer_unlock_commit(buffer, event); | 1393 | ring_buffer_unlock_commit(buffer, event); |
| 1379 | 1394 | ||
| 1380 | out_unlock: | 1395 | out_unlock: |
| 1381 | __raw_spin_unlock(&trace_buf_lock); | 1396 | arch_spin_unlock(&trace_buf_lock); |
| 1382 | raw_local_irq_restore(irq_flags); | 1397 | raw_local_irq_restore(irq_flags); |
| 1383 | unpause_graph_tracing(); | 1398 | unpause_graph_tracing(); |
| 1384 | out: | 1399 | out: |
| @@ -2279,7 +2294,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
| 2279 | mutex_lock(&tracing_cpumask_update_lock); | 2294 | mutex_lock(&tracing_cpumask_update_lock); |
| 2280 | 2295 | ||
| 2281 | local_irq_disable(); | 2296 | local_irq_disable(); |
| 2282 | __raw_spin_lock(&ftrace_max_lock); | 2297 | arch_spin_lock(&ftrace_max_lock); |
| 2283 | for_each_tracing_cpu(cpu) { | 2298 | for_each_tracing_cpu(cpu) { |
| 2284 | /* | 2299 | /* |
| 2285 | * Increase/decrease the disabled counter if we are | 2300 | * Increase/decrease the disabled counter if we are |
| @@ -2294,7 +2309,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
| 2294 | atomic_dec(&global_trace.data[cpu]->disabled); | 2309 | atomic_dec(&global_trace.data[cpu]->disabled); |
| 2295 | } | 2310 | } |
| 2296 | } | 2311 | } |
| 2297 | __raw_spin_unlock(&ftrace_max_lock); | 2312 | arch_spin_unlock(&ftrace_max_lock); |
| 2298 | local_irq_enable(); | 2313 | local_irq_enable(); |
| 2299 | 2314 | ||
| 2300 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); | 2315 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); |
| @@ -2316,67 +2331,49 @@ static const struct file_operations tracing_cpumask_fops = { | |||
| 2316 | .write = tracing_cpumask_write, | 2331 | .write = tracing_cpumask_write, |
| 2317 | }; | 2332 | }; |
| 2318 | 2333 | ||
| 2319 | static ssize_t | 2334 | static int tracing_trace_options_show(struct seq_file *m, void *v) |
| 2320 | tracing_trace_options_read(struct file *filp, char __user *ubuf, | ||
| 2321 | size_t cnt, loff_t *ppos) | ||
| 2322 | { | 2335 | { |
| 2323 | struct tracer_opt *trace_opts; | 2336 | struct tracer_opt *trace_opts; |
| 2324 | u32 tracer_flags; | 2337 | u32 tracer_flags; |
| 2325 | int len = 0; | ||
| 2326 | char *buf; | ||
| 2327 | int r = 0; | ||
| 2328 | int i; | 2338 | int i; |
| 2329 | 2339 | ||
| 2330 | |||
| 2331 | /* calculate max size */ | ||
| 2332 | for (i = 0; trace_options[i]; i++) { | ||
| 2333 | len += strlen(trace_options[i]); | ||
| 2334 | len += 3; /* "no" and newline */ | ||
| 2335 | } | ||
| 2336 | |||
| 2337 | mutex_lock(&trace_types_lock); | 2340 | mutex_lock(&trace_types_lock); |
| 2338 | tracer_flags = current_trace->flags->val; | 2341 | tracer_flags = current_trace->flags->val; |
| 2339 | trace_opts = current_trace->flags->opts; | 2342 | trace_opts = current_trace->flags->opts; |
| 2340 | 2343 | ||
| 2341 | /* | ||
| 2342 | * Increase the size with names of options specific | ||
| 2343 | * of the current tracer. | ||
| 2344 | */ | ||
| 2345 | for (i = 0; trace_opts[i].name; i++) { | ||
| 2346 | len += strlen(trace_opts[i].name); | ||
| 2347 | len += 3; /* "no" and newline */ | ||
| 2348 | } | ||
| 2349 | |||
| 2350 | /* +1 for \0 */ | ||
| 2351 | buf = kmalloc(len + 1, GFP_KERNEL); | ||
| 2352 | if (!buf) { | ||
| 2353 | mutex_unlock(&trace_types_lock); | ||
| 2354 | return -ENOMEM; | ||
| 2355 | } | ||
| 2356 | |||
| 2357 | for (i = 0; trace_options[i]; i++) { | 2344 | for (i = 0; trace_options[i]; i++) { |
| 2358 | if (trace_flags & (1 << i)) | 2345 | if (trace_flags & (1 << i)) |
| 2359 | r += sprintf(buf + r, "%s\n", trace_options[i]); | 2346 | seq_printf(m, "%s\n", trace_options[i]); |
| 2360 | else | 2347 | else |
| 2361 | r += sprintf(buf + r, "no%s\n", trace_options[i]); | 2348 | seq_printf(m, "no%s\n", trace_options[i]); |
| 2362 | } | 2349 | } |
| 2363 | 2350 | ||
| 2364 | for (i = 0; trace_opts[i].name; i++) { | 2351 | for (i = 0; trace_opts[i].name; i++) { |
| 2365 | if (tracer_flags & trace_opts[i].bit) | 2352 | if (tracer_flags & trace_opts[i].bit) |
| 2366 | r += sprintf(buf + r, "%s\n", | 2353 | seq_printf(m, "%s\n", trace_opts[i].name); |
| 2367 | trace_opts[i].name); | ||
| 2368 | else | 2354 | else |
| 2369 | r += sprintf(buf + r, "no%s\n", | 2355 | seq_printf(m, "no%s\n", trace_opts[i].name); |
| 2370 | trace_opts[i].name); | ||
| 2371 | } | 2356 | } |
| 2372 | mutex_unlock(&trace_types_lock); | 2357 | mutex_unlock(&trace_types_lock); |
| 2373 | 2358 | ||
| 2374 | WARN_ON(r >= len + 1); | 2359 | return 0; |
| 2360 | } | ||
| 2375 | 2361 | ||
| 2376 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2362 | static int __set_tracer_option(struct tracer *trace, |
| 2363 | struct tracer_flags *tracer_flags, | ||
| 2364 | struct tracer_opt *opts, int neg) | ||
| 2365 | { | ||
| 2366 | int ret; | ||
| 2377 | 2367 | ||
| 2378 | kfree(buf); | 2368 | ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); |
| 2379 | return r; | 2369 | if (ret) |
| 2370 | return ret; | ||
| 2371 | |||
| 2372 | if (neg) | ||
| 2373 | tracer_flags->val &= ~opts->bit; | ||
| 2374 | else | ||
| 2375 | tracer_flags->val |= opts->bit; | ||
| 2376 | return 0; | ||
| 2380 | } | 2377 | } |
| 2381 | 2378 | ||
| 2382 | /* Try to assign a tracer specific option */ | 2379 | /* Try to assign a tracer specific option */ |
| @@ -2384,33 +2381,17 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | |||
| 2384 | { | 2381 | { |
| 2385 | struct tracer_flags *tracer_flags = trace->flags; | 2382 | struct tracer_flags *tracer_flags = trace->flags; |
| 2386 | struct tracer_opt *opts = NULL; | 2383 | struct tracer_opt *opts = NULL; |
| 2387 | int ret = 0, i = 0; | 2384 | int i; |
| 2388 | int len; | ||
| 2389 | 2385 | ||
| 2390 | for (i = 0; tracer_flags->opts[i].name; i++) { | 2386 | for (i = 0; tracer_flags->opts[i].name; i++) { |
| 2391 | opts = &tracer_flags->opts[i]; | 2387 | opts = &tracer_flags->opts[i]; |
| 2392 | len = strlen(opts->name); | ||
| 2393 | 2388 | ||
| 2394 | if (strncmp(cmp, opts->name, len) == 0) { | 2389 | if (strcmp(cmp, opts->name) == 0) |
| 2395 | ret = trace->set_flag(tracer_flags->val, | 2390 | return __set_tracer_option(trace, trace->flags, |
| 2396 | opts->bit, !neg); | 2391 | opts, neg); |
| 2397 | break; | ||
| 2398 | } | ||
| 2399 | } | 2392 | } |
| 2400 | /* Not found */ | ||
| 2401 | if (!tracer_flags->opts[i].name) | ||
| 2402 | return -EINVAL; | ||
| 2403 | |||
| 2404 | /* Refused to handle */ | ||
| 2405 | if (ret) | ||
| 2406 | return ret; | ||
| 2407 | |||
| 2408 | if (neg) | ||
| 2409 | tracer_flags->val &= ~opts->bit; | ||
| 2410 | else | ||
| 2411 | tracer_flags->val |= opts->bit; | ||
| 2412 | 2393 | ||
| 2413 | return 0; | 2394 | return -EINVAL; |
| 2414 | } | 2395 | } |
| 2415 | 2396 | ||
| 2416 | static void set_tracer_flags(unsigned int mask, int enabled) | 2397 | static void set_tracer_flags(unsigned int mask, int enabled) |
| @@ -2430,7 +2411,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
| 2430 | size_t cnt, loff_t *ppos) | 2411 | size_t cnt, loff_t *ppos) |
| 2431 | { | 2412 | { |
| 2432 | char buf[64]; | 2413 | char buf[64]; |
| 2433 | char *cmp = buf; | 2414 | char *cmp; |
| 2434 | int neg = 0; | 2415 | int neg = 0; |
| 2435 | int ret; | 2416 | int ret; |
| 2436 | int i; | 2417 | int i; |
| @@ -2442,16 +2423,15 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
| 2442 | return -EFAULT; | 2423 | return -EFAULT; |
| 2443 | 2424 | ||
| 2444 | buf[cnt] = 0; | 2425 | buf[cnt] = 0; |
| 2426 | cmp = strstrip(buf); | ||
| 2445 | 2427 | ||
| 2446 | if (strncmp(buf, "no", 2) == 0) { | 2428 | if (strncmp(cmp, "no", 2) == 0) { |
| 2447 | neg = 1; | 2429 | neg = 1; |
| 2448 | cmp += 2; | 2430 | cmp += 2; |
| 2449 | } | 2431 | } |
| 2450 | 2432 | ||
| 2451 | for (i = 0; trace_options[i]; i++) { | 2433 | for (i = 0; trace_options[i]; i++) { |
| 2452 | int len = strlen(trace_options[i]); | 2434 | if (strcmp(cmp, trace_options[i]) == 0) { |
| 2453 | |||
| 2454 | if (strncmp(cmp, trace_options[i], len) == 0) { | ||
| 2455 | set_tracer_flags(1 << i, !neg); | 2435 | set_tracer_flags(1 << i, !neg); |
| 2456 | break; | 2436 | break; |
| 2457 | } | 2437 | } |
| @@ -2471,9 +2451,18 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
| 2471 | return cnt; | 2451 | return cnt; |
| 2472 | } | 2452 | } |
| 2473 | 2453 | ||
| 2454 | static int tracing_trace_options_open(struct inode *inode, struct file *file) | ||
| 2455 | { | ||
| 2456 | if (tracing_disabled) | ||
| 2457 | return -ENODEV; | ||
| 2458 | return single_open(file, tracing_trace_options_show, NULL); | ||
| 2459 | } | ||
| 2460 | |||
| 2474 | static const struct file_operations tracing_iter_fops = { | 2461 | static const struct file_operations tracing_iter_fops = { |
| 2475 | .open = tracing_open_generic, | 2462 | .open = tracing_trace_options_open, |
| 2476 | .read = tracing_trace_options_read, | 2463 | .read = seq_read, |
| 2464 | .llseek = seq_lseek, | ||
| 2465 | .release = single_release, | ||
| 2477 | .write = tracing_trace_options_write, | 2466 | .write = tracing_trace_options_write, |
| 2478 | }; | 2467 | }; |
| 2479 | 2468 | ||
| @@ -3133,7 +3122,7 @@ static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, | |||
| 3133 | __free_page(spd->pages[idx]); | 3122 | __free_page(spd->pages[idx]); |
| 3134 | } | 3123 | } |
| 3135 | 3124 | ||
| 3136 | static struct pipe_buf_operations tracing_pipe_buf_ops = { | 3125 | static const struct pipe_buf_operations tracing_pipe_buf_ops = { |
| 3137 | .can_merge = 0, | 3126 | .can_merge = 0, |
| 3138 | .map = generic_pipe_buf_map, | 3127 | .map = generic_pipe_buf_map, |
| 3139 | .unmap = generic_pipe_buf_unmap, | 3128 | .unmap = generic_pipe_buf_unmap, |
| @@ -3392,21 +3381,18 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
| 3392 | return cnt; | 3381 | return cnt; |
| 3393 | } | 3382 | } |
| 3394 | 3383 | ||
| 3395 | static ssize_t tracing_clock_read(struct file *filp, char __user *ubuf, | 3384 | static int tracing_clock_show(struct seq_file *m, void *v) |
| 3396 | size_t cnt, loff_t *ppos) | ||
| 3397 | { | 3385 | { |
| 3398 | char buf[64]; | ||
| 3399 | int bufiter = 0; | ||
| 3400 | int i; | 3386 | int i; |
| 3401 | 3387 | ||
| 3402 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) | 3388 | for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) |
| 3403 | bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, | 3389 | seq_printf(m, |
| 3404 | "%s%s%s%s", i ? " " : "", | 3390 | "%s%s%s%s", i ? " " : "", |
| 3405 | i == trace_clock_id ? "[" : "", trace_clocks[i].name, | 3391 | i == trace_clock_id ? "[" : "", trace_clocks[i].name, |
| 3406 | i == trace_clock_id ? "]" : ""); | 3392 | i == trace_clock_id ? "]" : ""); |
| 3407 | bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, "\n"); | 3393 | seq_putc(m, '\n'); |
| 3408 | 3394 | ||
| 3409 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, bufiter); | 3395 | return 0; |
| 3410 | } | 3396 | } |
| 3411 | 3397 | ||
| 3412 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | 3398 | static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, |
| @@ -3448,6 +3434,13 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | |||
| 3448 | return cnt; | 3434 | return cnt; |
| 3449 | } | 3435 | } |
| 3450 | 3436 | ||
| 3437 | static int tracing_clock_open(struct inode *inode, struct file *file) | ||
| 3438 | { | ||
| 3439 | if (tracing_disabled) | ||
| 3440 | return -ENODEV; | ||
| 3441 | return single_open(file, tracing_clock_show, NULL); | ||
| 3442 | } | ||
| 3443 | |||
| 3451 | static const struct file_operations tracing_max_lat_fops = { | 3444 | static const struct file_operations tracing_max_lat_fops = { |
| 3452 | .open = tracing_open_generic, | 3445 | .open = tracing_open_generic, |
| 3453 | .read = tracing_max_lat_read, | 3446 | .read = tracing_max_lat_read, |
| @@ -3486,8 +3479,10 @@ static const struct file_operations tracing_mark_fops = { | |||
| 3486 | }; | 3479 | }; |
| 3487 | 3480 | ||
| 3488 | static const struct file_operations trace_clock_fops = { | 3481 | static const struct file_operations trace_clock_fops = { |
| 3489 | .open = tracing_open_generic, | 3482 | .open = tracing_clock_open, |
| 3490 | .read = tracing_clock_read, | 3483 | .read = seq_read, |
| 3484 | .llseek = seq_lseek, | ||
| 3485 | .release = single_release, | ||
| 3491 | .write = tracing_clock_write, | 3486 | .write = tracing_clock_write, |
| 3492 | }; | 3487 | }; |
| 3493 | 3488 | ||
| @@ -3617,7 +3612,7 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, | |||
| 3617 | } | 3612 | } |
| 3618 | 3613 | ||
| 3619 | /* Pipe buffer operations for a buffer. */ | 3614 | /* Pipe buffer operations for a buffer. */ |
| 3620 | static struct pipe_buf_operations buffer_pipe_buf_ops = { | 3615 | static const struct pipe_buf_operations buffer_pipe_buf_ops = { |
| 3621 | .can_merge = 0, | 3616 | .can_merge = 0, |
| 3622 | .map = generic_pipe_buf_map, | 3617 | .map = generic_pipe_buf_map, |
| 3623 | .unmap = generic_pipe_buf_unmap, | 3618 | .unmap = generic_pipe_buf_unmap, |
| @@ -3948,39 +3943,16 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
| 3948 | if (ret < 0) | 3943 | if (ret < 0) |
| 3949 | return ret; | 3944 | return ret; |
| 3950 | 3945 | ||
| 3951 | ret = 0; | 3946 | if (val != 0 && val != 1) |
| 3952 | switch (val) { | 3947 | return -EINVAL; |
| 3953 | case 0: | ||
| 3954 | /* do nothing if already cleared */ | ||
| 3955 | if (!(topt->flags->val & topt->opt->bit)) | ||
| 3956 | break; | ||
| 3957 | |||
| 3958 | mutex_lock(&trace_types_lock); | ||
| 3959 | if (current_trace->set_flag) | ||
| 3960 | ret = current_trace->set_flag(topt->flags->val, | ||
| 3961 | topt->opt->bit, 0); | ||
| 3962 | mutex_unlock(&trace_types_lock); | ||
| 3963 | if (ret) | ||
| 3964 | return ret; | ||
| 3965 | topt->flags->val &= ~topt->opt->bit; | ||
| 3966 | break; | ||
| 3967 | case 1: | ||
| 3968 | /* do nothing if already set */ | ||
| 3969 | if (topt->flags->val & topt->opt->bit) | ||
| 3970 | break; | ||
| 3971 | 3948 | ||
| 3949 | if (!!(topt->flags->val & topt->opt->bit) != val) { | ||
| 3972 | mutex_lock(&trace_types_lock); | 3950 | mutex_lock(&trace_types_lock); |
| 3973 | if (current_trace->set_flag) | 3951 | ret = __set_tracer_option(current_trace, topt->flags, |
| 3974 | ret = current_trace->set_flag(topt->flags->val, | 3952 | topt->opt, !val); |
| 3975 | topt->opt->bit, 1); | ||
| 3976 | mutex_unlock(&trace_types_lock); | 3953 | mutex_unlock(&trace_types_lock); |
| 3977 | if (ret) | 3954 | if (ret) |
| 3978 | return ret; | 3955 | return ret; |
| 3979 | topt->flags->val |= topt->opt->bit; | ||
| 3980 | break; | ||
| 3981 | |||
| 3982 | default: | ||
| 3983 | return -EINVAL; | ||
| 3984 | } | 3956 | } |
| 3985 | 3957 | ||
| 3986 | *ppos += cnt; | 3958 | *ppos += cnt; |
| @@ -4307,8 +4279,8 @@ trace_printk_seq(struct trace_seq *s) | |||
| 4307 | 4279 | ||
| 4308 | static void __ftrace_dump(bool disable_tracing) | 4280 | static void __ftrace_dump(bool disable_tracing) |
| 4309 | { | 4281 | { |
| 4310 | static raw_spinlock_t ftrace_dump_lock = | 4282 | static arch_spinlock_t ftrace_dump_lock = |
| 4311 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 4283 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
| 4312 | /* use static because iter can be a bit big for the stack */ | 4284 | /* use static because iter can be a bit big for the stack */ |
| 4313 | static struct trace_iterator iter; | 4285 | static struct trace_iterator iter; |
| 4314 | unsigned int old_userobj; | 4286 | unsigned int old_userobj; |
| @@ -4318,7 +4290,7 @@ static void __ftrace_dump(bool disable_tracing) | |||
| 4318 | 4290 | ||
| 4319 | /* only one dump */ | 4291 | /* only one dump */ |
| 4320 | local_irq_save(flags); | 4292 | local_irq_save(flags); |
| 4321 | __raw_spin_lock(&ftrace_dump_lock); | 4293 | arch_spin_lock(&ftrace_dump_lock); |
| 4322 | if (dump_ran) | 4294 | if (dump_ran) |
| 4323 | goto out; | 4295 | goto out; |
| 4324 | 4296 | ||
| @@ -4393,7 +4365,7 @@ static void __ftrace_dump(bool disable_tracing) | |||
| 4393 | } | 4365 | } |
| 4394 | 4366 | ||
| 4395 | out: | 4367 | out: |
| 4396 | __raw_spin_unlock(&ftrace_dump_lock); | 4368 | arch_spin_unlock(&ftrace_dump_lock); |
| 4397 | local_irq_restore(flags); | 4369 | local_irq_restore(flags); |
| 4398 | } | 4370 | } |
| 4399 | 4371 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index a52bed2eedd8..4df6a77eb196 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -597,18 +597,17 @@ enum trace_iterator_flags { | |||
| 597 | TRACE_ITER_BIN = 0x40, | 597 | TRACE_ITER_BIN = 0x40, |
| 598 | TRACE_ITER_BLOCK = 0x80, | 598 | TRACE_ITER_BLOCK = 0x80, |
| 599 | TRACE_ITER_STACKTRACE = 0x100, | 599 | TRACE_ITER_STACKTRACE = 0x100, |
| 600 | TRACE_ITER_SCHED_TREE = 0x200, | 600 | TRACE_ITER_PRINTK = 0x200, |
| 601 | TRACE_ITER_PRINTK = 0x400, | 601 | TRACE_ITER_PREEMPTONLY = 0x400, |
| 602 | TRACE_ITER_PREEMPTONLY = 0x800, | 602 | TRACE_ITER_BRANCH = 0x800, |
| 603 | TRACE_ITER_BRANCH = 0x1000, | 603 | TRACE_ITER_ANNOTATE = 0x1000, |
| 604 | TRACE_ITER_ANNOTATE = 0x2000, | 604 | TRACE_ITER_USERSTACKTRACE = 0x2000, |
| 605 | TRACE_ITER_USERSTACKTRACE = 0x4000, | 605 | TRACE_ITER_SYM_USEROBJ = 0x4000, |
| 606 | TRACE_ITER_SYM_USEROBJ = 0x8000, | 606 | TRACE_ITER_PRINTK_MSGONLY = 0x8000, |
| 607 | TRACE_ITER_PRINTK_MSGONLY = 0x10000, | 607 | TRACE_ITER_CONTEXT_INFO = 0x10000, /* Print pid/cpu/time */ |
| 608 | TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */ | 608 | TRACE_ITER_LATENCY_FMT = 0x20000, |
| 609 | TRACE_ITER_LATENCY_FMT = 0x40000, | 609 | TRACE_ITER_SLEEP_TIME = 0x40000, |
| 610 | TRACE_ITER_SLEEP_TIME = 0x80000, | 610 | TRACE_ITER_GRAPH_TIME = 0x80000, |
| 611 | TRACE_ITER_GRAPH_TIME = 0x100000, | ||
| 612 | }; | 611 | }; |
| 613 | 612 | ||
| 614 | /* | 613 | /* |
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 878c03f386ba..84a3a7ba072a 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c | |||
| @@ -71,10 +71,10 @@ u64 notrace trace_clock(void) | |||
| 71 | /* keep prev_time and lock in the same cacheline. */ | 71 | /* keep prev_time and lock in the same cacheline. */ |
| 72 | static struct { | 72 | static struct { |
| 73 | u64 prev_time; | 73 | u64 prev_time; |
| 74 | raw_spinlock_t lock; | 74 | arch_spinlock_t lock; |
| 75 | } trace_clock_struct ____cacheline_aligned_in_smp = | 75 | } trace_clock_struct ____cacheline_aligned_in_smp = |
| 76 | { | 76 | { |
| 77 | .lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED, | 77 | .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED, |
| 78 | }; | 78 | }; |
| 79 | 79 | ||
| 80 | u64 notrace trace_clock_global(void) | 80 | u64 notrace trace_clock_global(void) |
| @@ -94,7 +94,7 @@ u64 notrace trace_clock_global(void) | |||
| 94 | if (unlikely(in_nmi())) | 94 | if (unlikely(in_nmi())) |
| 95 | goto out; | 95 | goto out; |
| 96 | 96 | ||
| 97 | __raw_spin_lock(&trace_clock_struct.lock); | 97 | arch_spin_lock(&trace_clock_struct.lock); |
| 98 | 98 | ||
| 99 | /* | 99 | /* |
| 100 | * TODO: if this happens often then maybe we should reset | 100 | * TODO: if this happens often then maybe we should reset |
| @@ -106,7 +106,7 @@ u64 notrace trace_clock_global(void) | |||
| 106 | 106 | ||
| 107 | trace_clock_struct.prev_time = now; | 107 | trace_clock_struct.prev_time = now; |
| 108 | 108 | ||
| 109 | __raw_spin_unlock(&trace_clock_struct.lock); | 109 | arch_spin_unlock(&trace_clock_struct.lock); |
| 110 | 110 | ||
| 111 | out: | 111 | out: |
| 112 | raw_local_irq_restore(flags); | 112 | raw_local_irq_restore(flags); |
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c index d9c60f80aa0d..9e25573242cf 100644 --- a/kernel/trace/trace_event_profile.c +++ b/kernel/trace/trace_event_profile.c | |||
| @@ -25,7 +25,7 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event) | |||
| 25 | char *buf; | 25 | char *buf; |
| 26 | int ret = -ENOMEM; | 26 | int ret = -ENOMEM; |
| 27 | 27 | ||
| 28 | if (atomic_inc_return(&event->profile_count)) | 28 | if (event->profile_count++ > 0) |
| 29 | return 0; | 29 | return 0; |
| 30 | 30 | ||
| 31 | if (!total_profile_count) { | 31 | if (!total_profile_count) { |
| @@ -56,7 +56,7 @@ fail_buf_nmi: | |||
| 56 | perf_trace_buf = NULL; | 56 | perf_trace_buf = NULL; |
| 57 | } | 57 | } |
| 58 | fail_buf: | 58 | fail_buf: |
| 59 | atomic_dec(&event->profile_count); | 59 | event->profile_count--; |
| 60 | 60 | ||
| 61 | return ret; | 61 | return ret; |
| 62 | } | 62 | } |
| @@ -83,7 +83,7 @@ static void ftrace_profile_disable_event(struct ftrace_event_call *event) | |||
| 83 | { | 83 | { |
| 84 | char *buf, *nmi_buf; | 84 | char *buf, *nmi_buf; |
| 85 | 85 | ||
| 86 | if (!atomic_add_negative(-1, &event->profile_count)) | 86 | if (--event->profile_count > 0) |
| 87 | return; | 87 | return; |
| 88 | 88 | ||
| 89 | event->profile_disable(event); | 89 | event->profile_disable(event); |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 1d18315dc836..189b09baf4fb 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
| @@ -78,7 +78,7 @@ EXPORT_SYMBOL_GPL(trace_define_field); | |||
| 78 | if (ret) \ | 78 | if (ret) \ |
| 79 | return ret; | 79 | return ret; |
| 80 | 80 | ||
| 81 | int trace_define_common_fields(struct ftrace_event_call *call) | 81 | static int trace_define_common_fields(struct ftrace_event_call *call) |
| 82 | { | 82 | { |
| 83 | int ret; | 83 | int ret; |
| 84 | struct trace_entry ent; | 84 | struct trace_entry ent; |
| @@ -91,7 +91,6 @@ int trace_define_common_fields(struct ftrace_event_call *call) | |||
| 91 | 91 | ||
| 92 | return ret; | 92 | return ret; |
| 93 | } | 93 | } |
| 94 | EXPORT_SYMBOL_GPL(trace_define_common_fields); | ||
| 95 | 94 | ||
| 96 | void trace_destroy_fields(struct ftrace_event_call *call) | 95 | void trace_destroy_fields(struct ftrace_event_call *call) |
| 97 | { | 96 | { |
| @@ -105,9 +104,25 @@ void trace_destroy_fields(struct ftrace_event_call *call) | |||
| 105 | } | 104 | } |
| 106 | } | 105 | } |
| 107 | 106 | ||
| 108 | static void ftrace_event_enable_disable(struct ftrace_event_call *call, | 107 | int trace_event_raw_init(struct ftrace_event_call *call) |
| 108 | { | ||
| 109 | int id; | ||
| 110 | |||
| 111 | id = register_ftrace_event(call->event); | ||
| 112 | if (!id) | ||
| 113 | return -ENODEV; | ||
| 114 | call->id = id; | ||
| 115 | INIT_LIST_HEAD(&call->fields); | ||
| 116 | |||
| 117 | return 0; | ||
| 118 | } | ||
| 119 | EXPORT_SYMBOL_GPL(trace_event_raw_init); | ||
| 120 | |||
| 121 | static int ftrace_event_enable_disable(struct ftrace_event_call *call, | ||
| 109 | int enable) | 122 | int enable) |
| 110 | { | 123 | { |
| 124 | int ret = 0; | ||
| 125 | |||
| 111 | switch (enable) { | 126 | switch (enable) { |
| 112 | case 0: | 127 | case 0: |
| 113 | if (call->enabled) { | 128 | if (call->enabled) { |
| @@ -118,12 +133,20 @@ static void ftrace_event_enable_disable(struct ftrace_event_call *call, | |||
| 118 | break; | 133 | break; |
| 119 | case 1: | 134 | case 1: |
| 120 | if (!call->enabled) { | 135 | if (!call->enabled) { |
| 121 | call->enabled = 1; | ||
| 122 | tracing_start_cmdline_record(); | 136 | tracing_start_cmdline_record(); |
| 123 | call->regfunc(call); | 137 | ret = call->regfunc(call); |
| 138 | if (ret) { | ||
| 139 | tracing_stop_cmdline_record(); | ||
| 140 | pr_info("event trace: Could not enable event " | ||
| 141 | "%s\n", call->name); | ||
| 142 | break; | ||
| 143 | } | ||
| 144 | call->enabled = 1; | ||
| 124 | } | 145 | } |
| 125 | break; | 146 | break; |
| 126 | } | 147 | } |
| 148 | |||
| 149 | return ret; | ||
| 127 | } | 150 | } |
| 128 | 151 | ||
| 129 | static void ftrace_clear_events(void) | 152 | static void ftrace_clear_events(void) |
| @@ -402,7 +425,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
| 402 | case 0: | 425 | case 0: |
| 403 | case 1: | 426 | case 1: |
| 404 | mutex_lock(&event_mutex); | 427 | mutex_lock(&event_mutex); |
| 405 | ftrace_event_enable_disable(call, val); | 428 | ret = ftrace_event_enable_disable(call, val); |
| 406 | mutex_unlock(&event_mutex); | 429 | mutex_unlock(&event_mutex); |
| 407 | break; | 430 | break; |
| 408 | 431 | ||
| @@ -412,7 +435,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
| 412 | 435 | ||
| 413 | *ppos += cnt; | 436 | *ppos += cnt; |
| 414 | 437 | ||
| 415 | return cnt; | 438 | return ret ? ret : cnt; |
| 416 | } | 439 | } |
| 417 | 440 | ||
| 418 | static ssize_t | 441 | static ssize_t |
| @@ -913,7 +936,9 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
| 913 | id); | 936 | id); |
| 914 | 937 | ||
| 915 | if (call->define_fields) { | 938 | if (call->define_fields) { |
| 916 | ret = call->define_fields(call); | 939 | ret = trace_define_common_fields(call); |
| 940 | if (!ret) | ||
| 941 | ret = call->define_fields(call); | ||
| 917 | if (ret < 0) { | 942 | if (ret < 0) { |
| 918 | pr_warning("Could not initialize trace point" | 943 | pr_warning("Could not initialize trace point" |
| 919 | " events/%s\n", call->name); | 944 | " events/%s\n", call->name); |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 50504cb228de..e42af9aad69f 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
| @@ -211,8 +211,9 @@ static int filter_pred_pchar(struct filter_pred *pred, void *event, | |||
| 211 | { | 211 | { |
| 212 | char **addr = (char **)(event + pred->offset); | 212 | char **addr = (char **)(event + pred->offset); |
| 213 | int cmp, match; | 213 | int cmp, match; |
| 214 | int len = strlen(*addr) + 1; /* including tailing '\0' */ | ||
| 214 | 215 | ||
| 215 | cmp = pred->regex.match(*addr, &pred->regex, pred->regex.field_len); | 216 | cmp = pred->regex.match(*addr, &pred->regex, len); |
| 216 | 217 | ||
| 217 | match = cmp ^ pred->not; | 218 | match = cmp ^ pred->not; |
| 218 | 219 | ||
| @@ -251,7 +252,18 @@ static int filter_pred_none(struct filter_pred *pred, void *event, | |||
| 251 | return 0; | 252 | return 0; |
| 252 | } | 253 | } |
| 253 | 254 | ||
| 254 | /* Basic regex callbacks */ | 255 | /* |
| 256 | * regex_match_foo - Basic regex callbacks | ||
| 257 | * | ||
| 258 | * @str: the string to be searched | ||
| 259 | * @r: the regex structure containing the pattern string | ||
| 260 | * @len: the length of the string to be searched (including '\0') | ||
| 261 | * | ||
| 262 | * Note: | ||
| 263 | * - @str might not be NULL-terminated if it's of type DYN_STRING | ||
| 264 | * or STATIC_STRING | ||
| 265 | */ | ||
| 266 | |||
| 255 | static int regex_match_full(char *str, struct regex *r, int len) | 267 | static int regex_match_full(char *str, struct regex *r, int len) |
| 256 | { | 268 | { |
| 257 | if (strncmp(str, r->pattern, len) == 0) | 269 | if (strncmp(str, r->pattern, len) == 0) |
| @@ -261,23 +273,24 @@ static int regex_match_full(char *str, struct regex *r, int len) | |||
| 261 | 273 | ||
| 262 | static int regex_match_front(char *str, struct regex *r, int len) | 274 | static int regex_match_front(char *str, struct regex *r, int len) |
| 263 | { | 275 | { |
| 264 | if (strncmp(str, r->pattern, len) == 0) | 276 | if (strncmp(str, r->pattern, r->len) == 0) |
| 265 | return 1; | 277 | return 1; |
| 266 | return 0; | 278 | return 0; |
| 267 | } | 279 | } |
| 268 | 280 | ||
| 269 | static int regex_match_middle(char *str, struct regex *r, int len) | 281 | static int regex_match_middle(char *str, struct regex *r, int len) |
| 270 | { | 282 | { |
| 271 | if (strstr(str, r->pattern)) | 283 | if (strnstr(str, r->pattern, len)) |
| 272 | return 1; | 284 | return 1; |
| 273 | return 0; | 285 | return 0; |
| 274 | } | 286 | } |
| 275 | 287 | ||
| 276 | static int regex_match_end(char *str, struct regex *r, int len) | 288 | static int regex_match_end(char *str, struct regex *r, int len) |
| 277 | { | 289 | { |
| 278 | char *ptr = strstr(str, r->pattern); | 290 | int strlen = len - 1; |
| 279 | 291 | ||
| 280 | if (ptr && (ptr[r->len] == 0)) | 292 | if (strlen >= r->len && |
| 293 | memcmp(str + strlen - r->len, r->pattern, r->len) == 0) | ||
| 281 | return 1; | 294 | return 1; |
| 282 | return 0; | 295 | return 0; |
| 283 | } | 296 | } |
| @@ -781,10 +794,8 @@ static int filter_add_pred(struct filter_parse_state *ps, | |||
| 781 | pred->regex.field_len = field->size; | 794 | pred->regex.field_len = field->size; |
| 782 | } else if (field->filter_type == FILTER_DYN_STRING) | 795 | } else if (field->filter_type == FILTER_DYN_STRING) |
| 783 | fn = filter_pred_strloc; | 796 | fn = filter_pred_strloc; |
| 784 | else { | 797 | else |
| 785 | fn = filter_pred_pchar; | 798 | fn = filter_pred_pchar; |
| 786 | pred->regex.field_len = strlen(pred->regex.pattern); | ||
| 787 | } | ||
| 788 | } else { | 799 | } else { |
| 789 | if (field->is_signed) | 800 | if (field->is_signed) |
| 790 | ret = strict_strtoll(pred->regex.pattern, 0, &val); | 801 | ret = strict_strtoll(pred->regex.pattern, 0, &val); |
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index dff8c84ddf17..d4fa5dc1ee4e 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
| @@ -158,7 +158,8 @@ ftrace_format_##name(struct ftrace_event_call *unused, \ | |||
| 158 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ | 158 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ |
| 159 | ret = trace_define_field(event_call, #type "[" #len "]", #item, \ | 159 | ret = trace_define_field(event_call, #type "[" #len "]", #item, \ |
| 160 | offsetof(typeof(field), item), \ | 160 | offsetof(typeof(field), item), \ |
| 161 | sizeof(field.item), 0, FILTER_OTHER); \ | 161 | sizeof(field.item), \ |
| 162 | is_signed_type(type), FILTER_OTHER); \ | ||
| 162 | if (ret) \ | 163 | if (ret) \ |
| 163 | return ret; | 164 | return ret; |
| 164 | 165 | ||
| @@ -168,8 +169,8 @@ ftrace_format_##name(struct ftrace_event_call *unused, \ | |||
| 168 | ret = trace_define_field(event_call, #type "[" #len "]", #item, \ | 169 | ret = trace_define_field(event_call, #type "[" #len "]", #item, \ |
| 169 | offsetof(typeof(field), \ | 170 | offsetof(typeof(field), \ |
| 170 | container.item), \ | 171 | container.item), \ |
| 171 | sizeof(field.container.item), 0, \ | 172 | sizeof(field.container.item), \ |
| 172 | FILTER_OTHER); \ | 173 | is_signed_type(type), FILTER_OTHER); \ |
| 173 | if (ret) \ | 174 | if (ret) \ |
| 174 | return ret; | 175 | return ret; |
| 175 | 176 | ||
| @@ -184,10 +185,6 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \ | |||
| 184 | struct struct_name field; \ | 185 | struct struct_name field; \ |
| 185 | int ret; \ | 186 | int ret; \ |
| 186 | \ | 187 | \ |
| 187 | ret = trace_define_common_fields(event_call); \ | ||
| 188 | if (ret) \ | ||
| 189 | return ret; \ | ||
| 190 | \ | ||
| 191 | tstruct; \ | 188 | tstruct; \ |
| 192 | \ | 189 | \ |
| 193 | return ret; \ | 190 | return ret; \ |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 3aa7eaa2114c..2974bc7538c7 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
| @@ -151,6 +151,8 @@ check_critical_timing(struct trace_array *tr, | |||
| 151 | goto out_unlock; | 151 | goto out_unlock; |
| 152 | 152 | ||
| 153 | trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); | 153 | trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
| 154 | /* Skip 5 functions to get to the irq/preempt enable function */ | ||
| 155 | __trace_stack(tr, flags, 5, pc); | ||
| 154 | 156 | ||
| 155 | if (data->critical_sequence != max_sequence) | 157 | if (data->critical_sequence != max_sequence) |
| 156 | goto out_unlock; | 158 | goto out_unlock; |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index b52d397e57eb..6ea90c0e2c96 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
| @@ -282,6 +282,18 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs); | |||
| 282 | static int kretprobe_dispatcher(struct kretprobe_instance *ri, | 282 | static int kretprobe_dispatcher(struct kretprobe_instance *ri, |
| 283 | struct pt_regs *regs); | 283 | struct pt_regs *regs); |
| 284 | 284 | ||
| 285 | /* Check the name is good for event/group */ | ||
| 286 | static int check_event_name(const char *name) | ||
| 287 | { | ||
| 288 | if (!isalpha(*name) && *name != '_') | ||
| 289 | return 0; | ||
| 290 | while (*++name != '\0') { | ||
| 291 | if (!isalpha(*name) && !isdigit(*name) && *name != '_') | ||
| 292 | return 0; | ||
| 293 | } | ||
| 294 | return 1; | ||
| 295 | } | ||
| 296 | |||
| 285 | /* | 297 | /* |
| 286 | * Allocate new trace_probe and initialize it (including kprobes). | 298 | * Allocate new trace_probe and initialize it (including kprobes). |
| 287 | */ | 299 | */ |
| @@ -293,10 +305,11 @@ static struct trace_probe *alloc_trace_probe(const char *group, | |||
| 293 | int nargs, int is_return) | 305 | int nargs, int is_return) |
| 294 | { | 306 | { |
| 295 | struct trace_probe *tp; | 307 | struct trace_probe *tp; |
| 308 | int ret = -ENOMEM; | ||
| 296 | 309 | ||
| 297 | tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL); | 310 | tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL); |
| 298 | if (!tp) | 311 | if (!tp) |
| 299 | return ERR_PTR(-ENOMEM); | 312 | return ERR_PTR(ret); |
| 300 | 313 | ||
| 301 | if (symbol) { | 314 | if (symbol) { |
| 302 | tp->symbol = kstrdup(symbol, GFP_KERNEL); | 315 | tp->symbol = kstrdup(symbol, GFP_KERNEL); |
| @@ -312,14 +325,20 @@ static struct trace_probe *alloc_trace_probe(const char *group, | |||
| 312 | else | 325 | else |
| 313 | tp->rp.kp.pre_handler = kprobe_dispatcher; | 326 | tp->rp.kp.pre_handler = kprobe_dispatcher; |
| 314 | 327 | ||
| 315 | if (!event) | 328 | if (!event || !check_event_name(event)) { |
| 329 | ret = -EINVAL; | ||
| 316 | goto error; | 330 | goto error; |
| 331 | } | ||
| 332 | |||
| 317 | tp->call.name = kstrdup(event, GFP_KERNEL); | 333 | tp->call.name = kstrdup(event, GFP_KERNEL); |
| 318 | if (!tp->call.name) | 334 | if (!tp->call.name) |
| 319 | goto error; | 335 | goto error; |
| 320 | 336 | ||
| 321 | if (!group) | 337 | if (!group || !check_event_name(group)) { |
| 338 | ret = -EINVAL; | ||
| 322 | goto error; | 339 | goto error; |
| 340 | } | ||
| 341 | |||
| 323 | tp->call.system = kstrdup(group, GFP_KERNEL); | 342 | tp->call.system = kstrdup(group, GFP_KERNEL); |
| 324 | if (!tp->call.system) | 343 | if (!tp->call.system) |
| 325 | goto error; | 344 | goto error; |
| @@ -330,7 +349,7 @@ error: | |||
| 330 | kfree(tp->call.name); | 349 | kfree(tp->call.name); |
| 331 | kfree(tp->symbol); | 350 | kfree(tp->symbol); |
| 332 | kfree(tp); | 351 | kfree(tp); |
| 333 | return ERR_PTR(-ENOMEM); | 352 | return ERR_PTR(ret); |
| 334 | } | 353 | } |
| 335 | 354 | ||
| 336 | static void free_probe_arg(struct probe_arg *arg) | 355 | static void free_probe_arg(struct probe_arg *arg) |
| @@ -695,10 +714,10 @@ static int create_trace_probe(int argc, char **argv) | |||
| 695 | if (!event) { | 714 | if (!event) { |
| 696 | /* Make a new event name */ | 715 | /* Make a new event name */ |
| 697 | if (symbol) | 716 | if (symbol) |
| 698 | snprintf(buf, MAX_EVENT_NAME_LEN, "%c@%s%+ld", | 717 | snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld", |
| 699 | is_return ? 'r' : 'p', symbol, offset); | 718 | is_return ? 'r' : 'p', symbol, offset); |
| 700 | else | 719 | else |
| 701 | snprintf(buf, MAX_EVENT_NAME_LEN, "%c@0x%p", | 720 | snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p", |
| 702 | is_return ? 'r' : 'p', addr); | 721 | is_return ? 'r' : 'p', addr); |
| 703 | event = buf; | 722 | event = buf; |
| 704 | } | 723 | } |
| @@ -1132,10 +1151,6 @@ static int kprobe_event_define_fields(struct ftrace_event_call *event_call) | |||
| 1132 | struct kprobe_trace_entry field; | 1151 | struct kprobe_trace_entry field; |
| 1133 | struct trace_probe *tp = (struct trace_probe *)event_call->data; | 1152 | struct trace_probe *tp = (struct trace_probe *)event_call->data; |
| 1134 | 1153 | ||
| 1135 | ret = trace_define_common_fields(event_call); | ||
| 1136 | if (ret) | ||
| 1137 | return ret; | ||
| 1138 | |||
| 1139 | DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); | 1154 | DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); |
| 1140 | DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1); | 1155 | DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1); |
| 1141 | /* Set argument names as fields */ | 1156 | /* Set argument names as fields */ |
| @@ -1150,10 +1165,6 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) | |||
| 1150 | struct kretprobe_trace_entry field; | 1165 | struct kretprobe_trace_entry field; |
| 1151 | struct trace_probe *tp = (struct trace_probe *)event_call->data; | 1166 | struct trace_probe *tp = (struct trace_probe *)event_call->data; |
| 1152 | 1167 | ||
| 1153 | ret = trace_define_common_fields(event_call); | ||
| 1154 | if (ret) | ||
| 1155 | return ret; | ||
| 1156 | |||
| 1157 | DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); | 1168 | DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); |
| 1158 | DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); | 1169 | DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); |
| 1159 | DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1); | 1170 | DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1); |
| @@ -1190,10 +1201,11 @@ static int __probe_event_show_format(struct trace_seq *s, | |||
| 1190 | #undef SHOW_FIELD | 1201 | #undef SHOW_FIELD |
| 1191 | #define SHOW_FIELD(type, item, name) \ | 1202 | #define SHOW_FIELD(type, item, name) \ |
| 1192 | do { \ | 1203 | do { \ |
| 1193 | ret = trace_seq_printf(s, "\tfield: " #type " %s;\t" \ | 1204 | ret = trace_seq_printf(s, "\tfield:" #type " %s;\t" \ |
| 1194 | "offset:%u;\tsize:%u;\n", name, \ | 1205 | "offset:%u;\tsize:%u;\tsigned:%d;\n", name,\ |
| 1195 | (unsigned int)offsetof(typeof(field), item),\ | 1206 | (unsigned int)offsetof(typeof(field), item),\ |
| 1196 | (unsigned int)sizeof(type)); \ | 1207 | (unsigned int)sizeof(type), \ |
| 1208 | is_signed_type(type)); \ | ||
| 1197 | if (!ret) \ | 1209 | if (!ret) \ |
| 1198 | return 0; \ | 1210 | return 0; \ |
| 1199 | } while (0) | 1211 | } while (0) |
| @@ -1453,7 +1465,6 @@ static int register_probe_event(struct trace_probe *tp) | |||
| 1453 | call->unregfunc = probe_event_disable; | 1465 | call->unregfunc = probe_event_disable; |
| 1454 | 1466 | ||
| 1455 | #ifdef CONFIG_EVENT_PROFILE | 1467 | #ifdef CONFIG_EVENT_PROFILE |
| 1456 | atomic_set(&call->profile_count, -1); | ||
| 1457 | call->profile_enable = probe_profile_enable; | 1468 | call->profile_enable = probe_profile_enable; |
| 1458 | call->profile_disable = probe_profile_disable; | 1469 | call->profile_disable = probe_profile_disable; |
| 1459 | #endif | 1470 | #endif |
diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index acb87d4a4ac1..94103cdcf9d8 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c | |||
| @@ -26,12 +26,13 @@ | |||
| 26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
| 27 | 27 | ||
| 28 | #include "trace_output.h" | 28 | #include "trace_output.h" |
| 29 | #include "trace_stat.h" | ||
| 30 | #include "trace.h" | 29 | #include "trace.h" |
| 31 | 30 | ||
| 32 | #include <linux/hw_breakpoint.h> | 31 | #include <linux/hw_breakpoint.h> |
| 33 | #include <asm/hw_breakpoint.h> | 32 | #include <asm/hw_breakpoint.h> |
| 34 | 33 | ||
| 34 | #include <asm/atomic.h> | ||
| 35 | |||
| 35 | /* | 36 | /* |
| 36 | * For now, let us restrict the no. of symbols traced simultaneously to number | 37 | * For now, let us restrict the no. of symbols traced simultaneously to number |
| 37 | * of available hardware breakpoint registers. | 38 | * of available hardware breakpoint registers. |
| @@ -44,7 +45,7 @@ struct trace_ksym { | |||
| 44 | struct perf_event **ksym_hbp; | 45 | struct perf_event **ksym_hbp; |
| 45 | struct perf_event_attr attr; | 46 | struct perf_event_attr attr; |
| 46 | #ifdef CONFIG_PROFILE_KSYM_TRACER | 47 | #ifdef CONFIG_PROFILE_KSYM_TRACER |
| 47 | unsigned long counter; | 48 | atomic64_t counter; |
| 48 | #endif | 49 | #endif |
| 49 | struct hlist_node ksym_hlist; | 50 | struct hlist_node ksym_hlist; |
| 50 | }; | 51 | }; |
| @@ -69,9 +70,8 @@ void ksym_collect_stats(unsigned long hbp_hit_addr) | |||
| 69 | 70 | ||
| 70 | rcu_read_lock(); | 71 | rcu_read_lock(); |
| 71 | hlist_for_each_entry_rcu(entry, node, &ksym_filter_head, ksym_hlist) { | 72 | hlist_for_each_entry_rcu(entry, node, &ksym_filter_head, ksym_hlist) { |
| 72 | if ((entry->attr.bp_addr == hbp_hit_addr) && | 73 | if (entry->attr.bp_addr == hbp_hit_addr) { |
| 73 | (entry->counter <= MAX_UL_INT)) { | 74 | atomic64_inc(&entry->counter); |
| 74 | entry->counter++; | ||
| 75 | break; | 75 | break; |
| 76 | } | 76 | } |
| 77 | } | 77 | } |
| @@ -197,7 +197,6 @@ int process_new_ksym_entry(char *ksymname, int op, unsigned long addr) | |||
| 197 | entry->attr.bp_addr = addr; | 197 | entry->attr.bp_addr = addr; |
| 198 | entry->attr.bp_len = HW_BREAKPOINT_LEN_4; | 198 | entry->attr.bp_len = HW_BREAKPOINT_LEN_4; |
| 199 | 199 | ||
| 200 | ret = -EAGAIN; | ||
| 201 | entry->ksym_hbp = register_wide_hw_breakpoint(&entry->attr, | 200 | entry->ksym_hbp = register_wide_hw_breakpoint(&entry->attr, |
| 202 | ksym_hbp_handler); | 201 | ksym_hbp_handler); |
| 203 | 202 | ||
| @@ -236,7 +235,8 @@ static ssize_t ksym_trace_filter_read(struct file *filp, char __user *ubuf, | |||
| 236 | mutex_lock(&ksym_tracer_mutex); | 235 | mutex_lock(&ksym_tracer_mutex); |
| 237 | 236 | ||
| 238 | hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { | 237 | hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { |
| 239 | ret = trace_seq_printf(s, "%pS:", (void *)entry->attr.bp_addr); | 238 | ret = trace_seq_printf(s, "%pS:", |
| 239 | (void *)(unsigned long)entry->attr.bp_addr); | ||
| 240 | if (entry->attr.bp_type == HW_BREAKPOINT_R) | 240 | if (entry->attr.bp_type == HW_BREAKPOINT_R) |
| 241 | ret = trace_seq_puts(s, "r--\n"); | 241 | ret = trace_seq_puts(s, "r--\n"); |
| 242 | else if (entry->attr.bp_type == HW_BREAKPOINT_W) | 242 | else if (entry->attr.bp_type == HW_BREAKPOINT_W) |
| @@ -278,21 +278,20 @@ static ssize_t ksym_trace_filter_write(struct file *file, | |||
| 278 | { | 278 | { |
| 279 | struct trace_ksym *entry; | 279 | struct trace_ksym *entry; |
| 280 | struct hlist_node *node; | 280 | struct hlist_node *node; |
| 281 | char *input_string, *ksymname = NULL; | 281 | char *buf, *input_string, *ksymname = NULL; |
| 282 | unsigned long ksym_addr = 0; | 282 | unsigned long ksym_addr = 0; |
| 283 | int ret, op, changed = 0; | 283 | int ret, op, changed = 0; |
| 284 | 284 | ||
| 285 | input_string = kzalloc(count + 1, GFP_KERNEL); | 285 | buf = kzalloc(count + 1, GFP_KERNEL); |
| 286 | if (!input_string) | 286 | if (!buf) |
| 287 | return -ENOMEM; | 287 | return -ENOMEM; |
| 288 | 288 | ||
| 289 | if (copy_from_user(input_string, buffer, count)) { | 289 | ret = -EFAULT; |
| 290 | kfree(input_string); | 290 | if (copy_from_user(buf, buffer, count)) |
| 291 | return -EFAULT; | 291 | goto out; |
| 292 | } | ||
| 293 | input_string[count] = '\0'; | ||
| 294 | 292 | ||
| 295 | strstrip(input_string); | 293 | buf[count] = '\0'; |
| 294 | input_string = strstrip(buf); | ||
| 296 | 295 | ||
| 297 | /* | 296 | /* |
| 298 | * Clear all breakpoints if: | 297 | * Clear all breakpoints if: |
| @@ -303,15 +302,13 @@ static ssize_t ksym_trace_filter_write(struct file *file, | |||
| 303 | if (!input_string[0] || !strcmp(input_string, "0") || | 302 | if (!input_string[0] || !strcmp(input_string, "0") || |
| 304 | !strcmp(input_string, "*:---")) { | 303 | !strcmp(input_string, "*:---")) { |
| 305 | __ksym_trace_reset(); | 304 | __ksym_trace_reset(); |
| 306 | kfree(input_string); | 305 | ret = 0; |
| 307 | return count; | 306 | goto out; |
| 308 | } | 307 | } |
| 309 | 308 | ||
| 310 | ret = op = parse_ksym_trace_str(input_string, &ksymname, &ksym_addr); | 309 | ret = op = parse_ksym_trace_str(input_string, &ksymname, &ksym_addr); |
| 311 | if (ret < 0) { | 310 | if (ret < 0) |
| 312 | kfree(input_string); | 311 | goto out; |
| 313 | return ret; | ||
| 314 | } | ||
| 315 | 312 | ||
| 316 | mutex_lock(&ksym_tracer_mutex); | 313 | mutex_lock(&ksym_tracer_mutex); |
| 317 | 314 | ||
| @@ -322,7 +319,7 @@ static ssize_t ksym_trace_filter_write(struct file *file, | |||
| 322 | if (entry->attr.bp_type != op) | 319 | if (entry->attr.bp_type != op) |
| 323 | changed = 1; | 320 | changed = 1; |
| 324 | else | 321 | else |
| 325 | goto out; | 322 | goto out_unlock; |
| 326 | break; | 323 | break; |
| 327 | } | 324 | } |
| 328 | } | 325 | } |
| @@ -337,28 +334,24 @@ static ssize_t ksym_trace_filter_write(struct file *file, | |||
| 337 | if (IS_ERR(entry->ksym_hbp)) | 334 | if (IS_ERR(entry->ksym_hbp)) |
| 338 | ret = PTR_ERR(entry->ksym_hbp); | 335 | ret = PTR_ERR(entry->ksym_hbp); |
| 339 | else | 336 | else |
| 340 | goto out; | 337 | goto out_unlock; |
| 341 | } | 338 | } |
| 342 | /* Error or "symbol:---" case: drop it */ | 339 | /* Error or "symbol:---" case: drop it */ |
| 343 | ksym_filter_entry_count--; | 340 | ksym_filter_entry_count--; |
| 344 | hlist_del_rcu(&(entry->ksym_hlist)); | 341 | hlist_del_rcu(&(entry->ksym_hlist)); |
| 345 | synchronize_rcu(); | 342 | synchronize_rcu(); |
| 346 | kfree(entry); | 343 | kfree(entry); |
| 347 | goto out; | 344 | goto out_unlock; |
| 348 | } else { | 345 | } else { |
| 349 | /* Check for malformed request: (4) */ | 346 | /* Check for malformed request: (4) */ |
| 350 | if (op == 0) | 347 | if (op) |
| 351 | goto out; | 348 | ret = process_new_ksym_entry(ksymname, op, ksym_addr); |
| 352 | ret = process_new_ksym_entry(ksymname, op, ksym_addr); | ||
| 353 | } | 349 | } |
| 354 | out: | 350 | out_unlock: |
| 355 | mutex_unlock(&ksym_tracer_mutex); | 351 | mutex_unlock(&ksym_tracer_mutex); |
| 356 | 352 | out: | |
| 357 | kfree(input_string); | 353 | kfree(buf); |
| 358 | 354 | return !ret ? count : ret; | |
| 359 | if (!ret) | ||
| 360 | ret = count; | ||
| 361 | return ret; | ||
| 362 | } | 355 | } |
| 363 | 356 | ||
| 364 | static const struct file_operations ksym_tracing_fops = { | 357 | static const struct file_operations ksym_tracing_fops = { |
| @@ -450,102 +443,77 @@ struct tracer ksym_tracer __read_mostly = | |||
| 450 | .print_line = ksym_trace_output | 443 | .print_line = ksym_trace_output |
| 451 | }; | 444 | }; |
| 452 | 445 | ||
| 453 | __init static int init_ksym_trace(void) | ||
| 454 | { | ||
| 455 | struct dentry *d_tracer; | ||
| 456 | struct dentry *entry; | ||
| 457 | |||
| 458 | d_tracer = tracing_init_dentry(); | ||
| 459 | ksym_filter_entry_count = 0; | ||
| 460 | |||
| 461 | entry = debugfs_create_file("ksym_trace_filter", 0644, d_tracer, | ||
| 462 | NULL, &ksym_tracing_fops); | ||
| 463 | if (!entry) | ||
| 464 | pr_warning("Could not create debugfs " | ||
| 465 | "'ksym_trace_filter' file\n"); | ||
| 466 | |||
| 467 | return register_tracer(&ksym_tracer); | ||
| 468 | } | ||
| 469 | device_initcall(init_ksym_trace); | ||
| 470 | |||
| 471 | |||
| 472 | #ifdef CONFIG_PROFILE_KSYM_TRACER | 446 | #ifdef CONFIG_PROFILE_KSYM_TRACER |
| 473 | static int ksym_tracer_stat_headers(struct seq_file *m) | 447 | static int ksym_profile_show(struct seq_file *m, void *v) |
| 474 | { | 448 | { |
| 449 | struct hlist_node *node; | ||
| 450 | struct trace_ksym *entry; | ||
| 451 | int access_type = 0; | ||
| 452 | char fn_name[KSYM_NAME_LEN]; | ||
| 453 | |||
| 475 | seq_puts(m, " Access Type "); | 454 | seq_puts(m, " Access Type "); |
| 476 | seq_puts(m, " Symbol Counter\n"); | 455 | seq_puts(m, " Symbol Counter\n"); |
| 477 | seq_puts(m, " ----------- "); | 456 | seq_puts(m, " ----------- "); |
| 478 | seq_puts(m, " ------ -------\n"); | 457 | seq_puts(m, " ------ -------\n"); |
| 479 | return 0; | ||
| 480 | } | ||
| 481 | 458 | ||
| 482 | static int ksym_tracer_stat_show(struct seq_file *m, void *v) | 459 | rcu_read_lock(); |
| 483 | { | 460 | hlist_for_each_entry_rcu(entry, node, &ksym_filter_head, ksym_hlist) { |
| 484 | struct hlist_node *stat = v; | ||
| 485 | struct trace_ksym *entry; | ||
| 486 | int access_type = 0; | ||
| 487 | char fn_name[KSYM_NAME_LEN]; | ||
| 488 | 461 | ||
| 489 | entry = hlist_entry(stat, struct trace_ksym, ksym_hlist); | 462 | access_type = entry->attr.bp_type; |
| 490 | 463 | ||
| 491 | access_type = entry->attr.bp_type; | 464 | switch (access_type) { |
| 465 | case HW_BREAKPOINT_R: | ||
| 466 | seq_puts(m, " R "); | ||
| 467 | break; | ||
| 468 | case HW_BREAKPOINT_W: | ||
| 469 | seq_puts(m, " W "); | ||
| 470 | break; | ||
| 471 | case HW_BREAKPOINT_R | HW_BREAKPOINT_W: | ||
| 472 | seq_puts(m, " RW "); | ||
| 473 | break; | ||
| 474 | default: | ||
| 475 | seq_puts(m, " NA "); | ||
| 476 | } | ||
| 492 | 477 | ||
| 493 | switch (access_type) { | 478 | if (lookup_symbol_name(entry->attr.bp_addr, fn_name) >= 0) |
| 494 | case HW_BREAKPOINT_R: | 479 | seq_printf(m, " %-36s", fn_name); |
| 495 | seq_puts(m, " R "); | 480 | else |
| 496 | break; | 481 | seq_printf(m, " %-36s", "<NA>"); |
| 497 | case HW_BREAKPOINT_W: | 482 | seq_printf(m, " %15llu\n", |
| 498 | seq_puts(m, " W "); | 483 | (unsigned long long)atomic64_read(&entry->counter)); |
| 499 | break; | ||
| 500 | case HW_BREAKPOINT_R | HW_BREAKPOINT_W: | ||
| 501 | seq_puts(m, " RW "); | ||
| 502 | break; | ||
| 503 | default: | ||
| 504 | seq_puts(m, " NA "); | ||
| 505 | } | 484 | } |
| 506 | 485 | rcu_read_unlock(); | |
| 507 | if (lookup_symbol_name(entry->attr.bp_addr, fn_name) >= 0) | ||
| 508 | seq_printf(m, " %-36s", fn_name); | ||
| 509 | else | ||
| 510 | seq_printf(m, " %-36s", "<NA>"); | ||
| 511 | seq_printf(m, " %15lu\n", entry->counter); | ||
| 512 | 486 | ||
| 513 | return 0; | 487 | return 0; |
| 514 | } | 488 | } |
| 515 | 489 | ||
| 516 | static void *ksym_tracer_stat_start(struct tracer_stat *trace) | 490 | static int ksym_profile_open(struct inode *node, struct file *file) |
| 517 | { | 491 | { |
| 518 | return ksym_filter_head.first; | 492 | return single_open(file, ksym_profile_show, NULL); |
| 519 | } | 493 | } |
| 520 | 494 | ||
| 521 | static void * | 495 | static const struct file_operations ksym_profile_fops = { |
| 522 | ksym_tracer_stat_next(void *v, int idx) | 496 | .open = ksym_profile_open, |
| 523 | { | 497 | .read = seq_read, |
| 524 | struct hlist_node *stat = v; | 498 | .llseek = seq_lseek, |
| 525 | 499 | .release = single_release, | |
| 526 | return stat->next; | ||
| 527 | } | ||
| 528 | |||
| 529 | static struct tracer_stat ksym_tracer_stats = { | ||
| 530 | .name = "ksym_tracer", | ||
| 531 | .stat_start = ksym_tracer_stat_start, | ||
| 532 | .stat_next = ksym_tracer_stat_next, | ||
| 533 | .stat_headers = ksym_tracer_stat_headers, | ||
| 534 | .stat_show = ksym_tracer_stat_show | ||
| 535 | }; | 500 | }; |
| 501 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ | ||
| 536 | 502 | ||
| 537 | __init static int ksym_tracer_stat_init(void) | 503 | __init static int init_ksym_trace(void) |
| 538 | { | 504 | { |
| 539 | int ret; | 505 | struct dentry *d_tracer; |
| 540 | 506 | ||
| 541 | ret = register_stat_tracer(&ksym_tracer_stats); | 507 | d_tracer = tracing_init_dentry(); |
| 542 | if (ret) { | ||
| 543 | printk(KERN_WARNING "Warning: could not register " | ||
| 544 | "ksym tracer stats\n"); | ||
| 545 | return 1; | ||
| 546 | } | ||
| 547 | 508 | ||
| 548 | return 0; | 509 | trace_create_file("ksym_trace_filter", 0644, d_tracer, |
| 510 | NULL, &ksym_tracing_fops); | ||
| 511 | |||
| 512 | #ifdef CONFIG_PROFILE_KSYM_TRACER | ||
| 513 | trace_create_file("ksym_profile", 0444, d_tracer, | ||
| 514 | NULL, &ksym_profile_fops); | ||
| 515 | #endif | ||
| 516 | |||
| 517 | return register_tracer(&ksym_tracer); | ||
| 549 | } | 518 | } |
| 550 | fs_initcall(ksym_tracer_stat_init); | 519 | device_initcall(init_ksym_trace); |
| 551 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ | ||
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 26185d727676..0271742abb8d 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
| @@ -28,8 +28,8 @@ static int wakeup_current_cpu; | |||
| 28 | static unsigned wakeup_prio = -1; | 28 | static unsigned wakeup_prio = -1; |
| 29 | static int wakeup_rt; | 29 | static int wakeup_rt; |
| 30 | 30 | ||
| 31 | static raw_spinlock_t wakeup_lock = | 31 | static arch_spinlock_t wakeup_lock = |
| 32 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 32 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
| 33 | 33 | ||
| 34 | static void __wakeup_reset(struct trace_array *tr); | 34 | static void __wakeup_reset(struct trace_array *tr); |
| 35 | 35 | ||
| @@ -143,7 +143,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, | |||
| 143 | goto out; | 143 | goto out; |
| 144 | 144 | ||
| 145 | local_irq_save(flags); | 145 | local_irq_save(flags); |
| 146 | __raw_spin_lock(&wakeup_lock); | 146 | arch_spin_lock(&wakeup_lock); |
| 147 | 147 | ||
| 148 | /* We could race with grabbing wakeup_lock */ | 148 | /* We could race with grabbing wakeup_lock */ |
| 149 | if (unlikely(!tracer_enabled || next != wakeup_task)) | 149 | if (unlikely(!tracer_enabled || next != wakeup_task)) |
| @@ -169,7 +169,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, | |||
| 169 | 169 | ||
| 170 | out_unlock: | 170 | out_unlock: |
| 171 | __wakeup_reset(wakeup_trace); | 171 | __wakeup_reset(wakeup_trace); |
| 172 | __raw_spin_unlock(&wakeup_lock); | 172 | arch_spin_unlock(&wakeup_lock); |
| 173 | local_irq_restore(flags); | 173 | local_irq_restore(flags); |
| 174 | out: | 174 | out: |
| 175 | atomic_dec(&wakeup_trace->data[cpu]->disabled); | 175 | atomic_dec(&wakeup_trace->data[cpu]->disabled); |
| @@ -193,9 +193,9 @@ static void wakeup_reset(struct trace_array *tr) | |||
| 193 | tracing_reset_online_cpus(tr); | 193 | tracing_reset_online_cpus(tr); |
| 194 | 194 | ||
| 195 | local_irq_save(flags); | 195 | local_irq_save(flags); |
| 196 | __raw_spin_lock(&wakeup_lock); | 196 | arch_spin_lock(&wakeup_lock); |
| 197 | __wakeup_reset(tr); | 197 | __wakeup_reset(tr); |
| 198 | __raw_spin_unlock(&wakeup_lock); | 198 | arch_spin_unlock(&wakeup_lock); |
| 199 | local_irq_restore(flags); | 199 | local_irq_restore(flags); |
| 200 | } | 200 | } |
| 201 | 201 | ||
| @@ -225,7 +225,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success) | |||
| 225 | goto out; | 225 | goto out; |
| 226 | 226 | ||
| 227 | /* interrupts should be off from try_to_wake_up */ | 227 | /* interrupts should be off from try_to_wake_up */ |
| 228 | __raw_spin_lock(&wakeup_lock); | 228 | arch_spin_lock(&wakeup_lock); |
| 229 | 229 | ||
| 230 | /* check for races. */ | 230 | /* check for races. */ |
| 231 | if (!tracer_enabled || p->prio >= wakeup_prio) | 231 | if (!tracer_enabled || p->prio >= wakeup_prio) |
| @@ -255,7 +255,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success) | |||
| 255 | trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); | 255 | trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); |
| 256 | 256 | ||
| 257 | out_locked: | 257 | out_locked: |
| 258 | __raw_spin_unlock(&wakeup_lock); | 258 | arch_spin_unlock(&wakeup_lock); |
| 259 | out: | 259 | out: |
| 260 | atomic_dec(&wakeup_trace->data[cpu]->disabled); | 260 | atomic_dec(&wakeup_trace->data[cpu]->disabled); |
| 261 | } | 261 | } |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index dc98309e839a..280fea470d67 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
| @@ -67,7 +67,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
| 67 | 67 | ||
| 68 | /* Don't allow flipping of max traces now */ | 68 | /* Don't allow flipping of max traces now */ |
| 69 | local_irq_save(flags); | 69 | local_irq_save(flags); |
| 70 | __raw_spin_lock(&ftrace_max_lock); | 70 | arch_spin_lock(&ftrace_max_lock); |
| 71 | 71 | ||
| 72 | cnt = ring_buffer_entries(tr->buffer); | 72 | cnt = ring_buffer_entries(tr->buffer); |
| 73 | 73 | ||
| @@ -85,7 +85,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
| 85 | break; | 85 | break; |
| 86 | } | 86 | } |
| 87 | tracing_on(); | 87 | tracing_on(); |
| 88 | __raw_spin_unlock(&ftrace_max_lock); | 88 | arch_spin_unlock(&ftrace_max_lock); |
| 89 | local_irq_restore(flags); | 89 | local_irq_restore(flags); |
| 90 | 90 | ||
| 91 | if (count) | 91 | if (count) |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 8504ac71e4e8..678a5120ee30 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
| @@ -27,8 +27,8 @@ static struct stack_trace max_stack_trace = { | |||
| 27 | }; | 27 | }; |
| 28 | 28 | ||
| 29 | static unsigned long max_stack_size; | 29 | static unsigned long max_stack_size; |
| 30 | static raw_spinlock_t max_stack_lock = | 30 | static arch_spinlock_t max_stack_lock = |
| 31 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 31 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
| 32 | 32 | ||
| 33 | static int stack_trace_disabled __read_mostly; | 33 | static int stack_trace_disabled __read_mostly; |
| 34 | static DEFINE_PER_CPU(int, trace_active); | 34 | static DEFINE_PER_CPU(int, trace_active); |
| @@ -54,7 +54,7 @@ static inline void check_stack(void) | |||
| 54 | return; | 54 | return; |
| 55 | 55 | ||
| 56 | local_irq_save(flags); | 56 | local_irq_save(flags); |
| 57 | __raw_spin_lock(&max_stack_lock); | 57 | arch_spin_lock(&max_stack_lock); |
| 58 | 58 | ||
| 59 | /* a race could have already updated it */ | 59 | /* a race could have already updated it */ |
| 60 | if (this_size <= max_stack_size) | 60 | if (this_size <= max_stack_size) |
| @@ -103,7 +103,7 @@ static inline void check_stack(void) | |||
| 103 | } | 103 | } |
| 104 | 104 | ||
| 105 | out: | 105 | out: |
| 106 | __raw_spin_unlock(&max_stack_lock); | 106 | arch_spin_unlock(&max_stack_lock); |
| 107 | local_irq_restore(flags); | 107 | local_irq_restore(flags); |
| 108 | } | 108 | } |
| 109 | 109 | ||
| @@ -171,9 +171,9 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
| 171 | return ret; | 171 | return ret; |
| 172 | 172 | ||
| 173 | local_irq_save(flags); | 173 | local_irq_save(flags); |
| 174 | __raw_spin_lock(&max_stack_lock); | 174 | arch_spin_lock(&max_stack_lock); |
| 175 | *ptr = val; | 175 | *ptr = val; |
| 176 | __raw_spin_unlock(&max_stack_lock); | 176 | arch_spin_unlock(&max_stack_lock); |
| 177 | local_irq_restore(flags); | 177 | local_irq_restore(flags); |
| 178 | 178 | ||
| 179 | return count; | 179 | return count; |
| @@ -207,7 +207,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
| 207 | static void *t_start(struct seq_file *m, loff_t *pos) | 207 | static void *t_start(struct seq_file *m, loff_t *pos) |
| 208 | { | 208 | { |
| 209 | local_irq_disable(); | 209 | local_irq_disable(); |
| 210 | __raw_spin_lock(&max_stack_lock); | 210 | arch_spin_lock(&max_stack_lock); |
| 211 | 211 | ||
| 212 | if (*pos == 0) | 212 | if (*pos == 0) |
| 213 | return SEQ_START_TOKEN; | 213 | return SEQ_START_TOKEN; |
| @@ -217,7 +217,7 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
| 217 | 217 | ||
| 218 | static void t_stop(struct seq_file *m, void *p) | 218 | static void t_stop(struct seq_file *m, void *p) |
| 219 | { | 219 | { |
| 220 | __raw_spin_unlock(&max_stack_lock); | 220 | arch_spin_unlock(&max_stack_lock); |
| 221 | local_irq_enable(); | 221 | local_irq_enable(); |
| 222 | } | 222 | } |
| 223 | 223 | ||
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 57501d90096a..75289f372dd2 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
| @@ -217,10 +217,6 @@ int syscall_enter_define_fields(struct ftrace_event_call *call) | |||
| 217 | int i; | 217 | int i; |
| 218 | int offset = offsetof(typeof(trace), args); | 218 | int offset = offsetof(typeof(trace), args); |
| 219 | 219 | ||
| 220 | ret = trace_define_common_fields(call); | ||
| 221 | if (ret) | ||
| 222 | return ret; | ||
| 223 | |||
| 224 | ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); | 220 | ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); |
| 225 | if (ret) | 221 | if (ret) |
| 226 | return ret; | 222 | return ret; |
| @@ -241,10 +237,6 @@ int syscall_exit_define_fields(struct ftrace_event_call *call) | |||
| 241 | struct syscall_trace_exit trace; | 237 | struct syscall_trace_exit trace; |
| 242 | int ret; | 238 | int ret; |
| 243 | 239 | ||
| 244 | ret = trace_define_common_fields(call); | ||
| 245 | if (ret) | ||
| 246 | return ret; | ||
| 247 | |||
| 248 | ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); | 240 | ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); |
| 249 | if (ret) | 241 | if (ret) |
| 250 | return ret; | 242 | return ret; |
| @@ -333,10 +325,7 @@ int reg_event_syscall_enter(struct ftrace_event_call *call) | |||
| 333 | mutex_lock(&syscall_trace_lock); | 325 | mutex_lock(&syscall_trace_lock); |
| 334 | if (!sys_refcount_enter) | 326 | if (!sys_refcount_enter) |
| 335 | ret = register_trace_sys_enter(ftrace_syscall_enter); | 327 | ret = register_trace_sys_enter(ftrace_syscall_enter); |
| 336 | if (ret) { | 328 | if (!ret) { |
| 337 | pr_info("event trace: Could not activate" | ||
| 338 | "syscall entry trace point"); | ||
| 339 | } else { | ||
| 340 | set_bit(num, enabled_enter_syscalls); | 329 | set_bit(num, enabled_enter_syscalls); |
| 341 | sys_refcount_enter++; | 330 | sys_refcount_enter++; |
| 342 | } | 331 | } |
| @@ -370,10 +359,7 @@ int reg_event_syscall_exit(struct ftrace_event_call *call) | |||
| 370 | mutex_lock(&syscall_trace_lock); | 359 | mutex_lock(&syscall_trace_lock); |
| 371 | if (!sys_refcount_exit) | 360 | if (!sys_refcount_exit) |
| 372 | ret = register_trace_sys_exit(ftrace_syscall_exit); | 361 | ret = register_trace_sys_exit(ftrace_syscall_exit); |
| 373 | if (ret) { | 362 | if (!ret) { |
| 374 | pr_info("event trace: Could not activate" | ||
| 375 | "syscall exit trace point"); | ||
| 376 | } else { | ||
| 377 | set_bit(num, enabled_exit_syscalls); | 363 | set_bit(num, enabled_exit_syscalls); |
| 378 | sys_refcount_exit++; | 364 | sys_refcount_exit++; |
| 379 | } | 365 | } |
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index f6693969287d..a7974a552ca9 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c | |||
| @@ -93,6 +93,7 @@ static const struct stacktrace_ops backtrace_ops = { | |||
| 93 | .warning_symbol = backtrace_warning_symbol, | 93 | .warning_symbol = backtrace_warning_symbol, |
| 94 | .stack = backtrace_stack, | 94 | .stack = backtrace_stack, |
| 95 | .address = backtrace_address, | 95 | .address = backtrace_address, |
| 96 | .walk_stack = print_context_stack, | ||
| 96 | }; | 97 | }; |
| 97 | 98 | ||
| 98 | static int | 99 | static int |
