diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-10 18:22:03 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-10 18:22:03 -0400 |
| commit | 12ad143e1b803e541e48b8ba40f550250259ecdd (patch) | |
| tree | 5202b407df21e5abaeb294d1ecddcf0a2eca7f8b /tools/perf/scripts/python | |
| parent | 262d6a9a63a387c8dfa9eb4f7713e159c941e52c (diff) | |
| parent | b339da480315505aa28a723a983217ebcff95c86 (diff) | |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Thomas Gleixner:
"Perf updates and fixes:
Kernel:
- Handle events which have the bpf_event attribute set as side band
events as they carry information about BPF programs.
- Add missing switch-case fall-through comments
Libraries:
- Fix leaks and double frees in error code paths.
- Prevent buffer overflows in libtraceevent
Tools:
- Improvements in handling Intel BT/PTS
- Add BTF ELF markers to perf trace BPF programs to improve output
- Support --time, --cpu, --pid and --tid filters for perf diff
- Calculate the column width in perf annotate as the hardcoded 6
characters for the instruction are not sufficient
- Small fixes all over the place"
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (38 commits)
perf/core: Mark expected switch fall-through
perf/x86/intel/uncore: Fix client IMC events return huge result
perf/ring_buffer: Use high order allocations for AUX buffers optimistically
perf data: Force perf_data__open|close zero data->file.path
perf session: Fix double free in perf_data__close
perf evsel: Probe for precise_ip with simple attr
perf tools: Read and store caps/max_precise in perf_pmu
perf hist: Fix memory leak of srcline
perf hist: Add error path into hist_entry__init
perf c2c: Fix c2c report for empty numa node
perf script python: Add Python3 support to intel-pt-events.py
perf script python: Add Python3 support to event_analyzing_sample.py
perf script python: add Python3 support to check-perf-trace.py
perf script python: Add Python3 support to futex-contention.py
perf script python: Remove mixed indentation
perf diff: Support --pid/--tid filter options
perf diff: Support --cpu filter option
perf diff: Support --time filter option
perf thread: Generalize function to copy from thread addr space from intel-bts code
perf annotate: Calculate the max instruction name, align column to that
...
Diffstat (limited to 'tools/perf/scripts/python')
| -rw-r--r-- | tools/perf/scripts/python/check-perf-trace.py | 76 | ||||
| -rw-r--r-- | tools/perf/scripts/python/compaction-times.py | 8 | ||||
| -rw-r--r-- | tools/perf/scripts/python/event_analyzing_sample.py | 48 | ||||
| -rw-r--r-- | tools/perf/scripts/python/export-to-postgresql.py | 16 | ||||
| -rw-r--r-- | tools/perf/scripts/python/export-to-sqlite.py | 12 | ||||
| -rwxr-xr-x | tools/perf/scripts/python/exported-sql-viewer.py | 354 | ||||
| -rw-r--r-- | tools/perf/scripts/python/failed-syscalls-by-pid.py | 38 | ||||
| -rw-r--r-- | tools/perf/scripts/python/futex-contention.py | 10 | ||||
| -rw-r--r-- | tools/perf/scripts/python/intel-pt-events.py | 60 | ||||
| -rw-r--r-- | tools/perf/scripts/python/mem-phys-addr.py | 7 | ||||
| -rwxr-xr-x | tools/perf/scripts/python/net_dropmonitor.py | 2 | ||||
| -rw-r--r-- | tools/perf/scripts/python/netdev-times.py | 12 | ||||
| -rw-r--r-- | tools/perf/scripts/python/sched-migration.py | 6 | ||||
| -rw-r--r-- | tools/perf/scripts/python/sctop.py | 13 | ||||
| -rwxr-xr-x | tools/perf/scripts/python/stackcollapse.py | 2 | ||||
| -rw-r--r-- | tools/perf/scripts/python/syscall-counts-by-pid.py | 47 | ||||
| -rw-r--r-- | tools/perf/scripts/python/syscall-counts.py | 31 |
17 files changed, 484 insertions, 258 deletions
diff --git a/tools/perf/scripts/python/check-perf-trace.py b/tools/perf/scripts/python/check-perf-trace.py index 334599c6032c..d2c22954800d 100644 --- a/tools/perf/scripts/python/check-perf-trace.py +++ b/tools/perf/scripts/python/check-perf-trace.py | |||
| @@ -7,6 +7,8 @@ | |||
| 7 | # events, etc. Basically, if this script runs successfully and | 7 | # events, etc. Basically, if this script runs successfully and |
| 8 | # displays expected results, Python scripting support should be ok. | 8 | # displays expected results, Python scripting support should be ok. |
| 9 | 9 | ||
| 10 | from __future__ import print_function | ||
| 11 | |||
| 10 | import os | 12 | import os |
| 11 | import sys | 13 | import sys |
| 12 | 14 | ||
| @@ -19,64 +21,64 @@ from perf_trace_context import * | |||
| 19 | unhandled = autodict() | 21 | unhandled = autodict() |
| 20 | 22 | ||
| 21 | def trace_begin(): | 23 | def trace_begin(): |
| 22 | print "trace_begin" | 24 | print("trace_begin") |
| 23 | pass | 25 | pass |
| 24 | 26 | ||
| 25 | def trace_end(): | 27 | def trace_end(): |
| 26 | print_unhandled() | 28 | print_unhandled() |
| 27 | 29 | ||
| 28 | def irq__softirq_entry(event_name, context, common_cpu, | 30 | def irq__softirq_entry(event_name, context, common_cpu, |
| 29 | common_secs, common_nsecs, common_pid, common_comm, | 31 | common_secs, common_nsecs, common_pid, common_comm, |
| 30 | common_callchain, vec): | 32 | common_callchain, vec): |
| 31 | print_header(event_name, common_cpu, common_secs, common_nsecs, | 33 | print_header(event_name, common_cpu, common_secs, common_nsecs, |
| 32 | common_pid, common_comm) | 34 | common_pid, common_comm) |
| 33 | 35 | ||
| 34 | print_uncommon(context) | 36 | print_uncommon(context) |
| 35 | 37 | ||
| 36 | print "vec=%s\n" % \ | 38 | print("vec=%s" % (symbol_str("irq__softirq_entry", "vec", vec))) |
| 37 | (symbol_str("irq__softirq_entry", "vec", vec)), | ||
| 38 | 39 | ||
| 39 | def kmem__kmalloc(event_name, context, common_cpu, | 40 | def kmem__kmalloc(event_name, context, common_cpu, |
| 40 | common_secs, common_nsecs, common_pid, common_comm, | 41 | common_secs, common_nsecs, common_pid, common_comm, |
| 41 | common_callchain, call_site, ptr, bytes_req, bytes_alloc, | 42 | common_callchain, call_site, ptr, bytes_req, bytes_alloc, |
| 42 | gfp_flags): | 43 | gfp_flags): |
| 43 | print_header(event_name, common_cpu, common_secs, common_nsecs, | 44 | print_header(event_name, common_cpu, common_secs, common_nsecs, |
| 44 | common_pid, common_comm) | 45 | common_pid, common_comm) |
| 45 | 46 | ||
| 46 | print_uncommon(context) | 47 | print_uncommon(context) |
| 47 | 48 | ||
| 48 | print "call_site=%u, ptr=%u, bytes_req=%u, " \ | 49 | print("call_site=%u, ptr=%u, bytes_req=%u, " |
| 49 | "bytes_alloc=%u, gfp_flags=%s\n" % \ | 50 | "bytes_alloc=%u, gfp_flags=%s" % |
| 50 | (call_site, ptr, bytes_req, bytes_alloc, | 51 | (call_site, ptr, bytes_req, bytes_alloc, |
| 51 | 52 | flag_str("kmem__kmalloc", "gfp_flags", gfp_flags))) | |
| 52 | flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)), | ||
| 53 | 53 | ||
| 54 | def trace_unhandled(event_name, context, event_fields_dict): | 54 | def trace_unhandled(event_name, context, event_fields_dict): |
| 55 | try: | 55 | try: |
| 56 | unhandled[event_name] += 1 | 56 | unhandled[event_name] += 1 |
| 57 | except TypeError: | 57 | except TypeError: |
| 58 | unhandled[event_name] = 1 | 58 | unhandled[event_name] = 1 |
| 59 | 59 | ||
| 60 | def print_header(event_name, cpu, secs, nsecs, pid, comm): | 60 | def print_header(event_name, cpu, secs, nsecs, pid, comm): |
| 61 | print "%-20s %5u %05u.%09u %8u %-20s " % \ | 61 | print("%-20s %5u %05u.%09u %8u %-20s " % |
| 62 | (event_name, cpu, secs, nsecs, pid, comm), | 62 | (event_name, cpu, secs, nsecs, pid, comm), |
| 63 | end=' ') | ||
| 63 | 64 | ||
| 64 | # print trace fields not included in handler args | 65 | # print trace fields not included in handler args |
| 65 | def print_uncommon(context): | 66 | def print_uncommon(context): |
| 66 | print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \ | 67 | print("common_preempt_count=%d, common_flags=%s, " |
| 67 | % (common_pc(context), trace_flag_str(common_flags(context)), \ | 68 | "common_lock_depth=%d, " % |
| 68 | common_lock_depth(context)) | 69 | (common_pc(context), trace_flag_str(common_flags(context)), |
| 70 | common_lock_depth(context))) | ||
| 69 | 71 | ||
| 70 | def print_unhandled(): | 72 | def print_unhandled(): |
| 71 | keys = unhandled.keys() | 73 | keys = unhandled.keys() |
| 72 | if not keys: | 74 | if not keys: |
| 73 | return | 75 | return |
| 74 | 76 | ||
| 75 | print "\nunhandled events:\n\n", | 77 | print("\nunhandled events:\n") |
| 76 | 78 | ||
| 77 | print "%-40s %10s\n" % ("event", "count"), | 79 | print("%-40s %10s" % ("event", "count")) |
| 78 | print "%-40s %10s\n" % ("----------------------------------------", \ | 80 | print("%-40s %10s" % ("----------------------------------------", |
| 79 | "-----------"), | 81 | "-----------")) |
| 80 | 82 | ||
| 81 | for event_name in keys: | 83 | for event_name in keys: |
| 82 | print "%-40s %10d\n" % (event_name, unhandled[event_name]) | 84 | print("%-40s %10d\n" % (event_name, unhandled[event_name])) |
diff --git a/tools/perf/scripts/python/compaction-times.py b/tools/perf/scripts/python/compaction-times.py index 239cb0568ec3..2560a042dc6f 100644 --- a/tools/perf/scripts/python/compaction-times.py +++ b/tools/perf/scripts/python/compaction-times.py | |||
| @@ -216,15 +216,15 @@ def compaction__mm_compaction_migratepages(event_name, context, common_cpu, | |||
| 216 | pair(nr_migrated, nr_failed), None, None) | 216 | pair(nr_migrated, nr_failed), None, None) |
| 217 | 217 | ||
| 218 | def compaction__mm_compaction_isolate_freepages(event_name, context, common_cpu, | 218 | def compaction__mm_compaction_isolate_freepages(event_name, context, common_cpu, |
| 219 | common_secs, common_nsecs, common_pid, common_comm, | 219 | common_secs, common_nsecs, common_pid, common_comm, |
| 220 | common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken): | 220 | common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken): |
| 221 | 221 | ||
| 222 | chead.increment_pending(common_pid, | 222 | chead.increment_pending(common_pid, |
| 223 | None, pair(nr_scanned, nr_taken), None) | 223 | None, pair(nr_scanned, nr_taken), None) |
| 224 | 224 | ||
| 225 | def compaction__mm_compaction_isolate_migratepages(event_name, context, common_cpu, | 225 | def compaction__mm_compaction_isolate_migratepages(event_name, context, common_cpu, |
| 226 | common_secs, common_nsecs, common_pid, common_comm, | 226 | common_secs, common_nsecs, common_pid, common_comm, |
| 227 | common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken): | 227 | common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken): |
| 228 | 228 | ||
| 229 | chead.increment_pending(common_pid, | 229 | chead.increment_pending(common_pid, |
| 230 | None, None, pair(nr_scanned, nr_taken)) | 230 | None, None, pair(nr_scanned, nr_taken)) |
diff --git a/tools/perf/scripts/python/event_analyzing_sample.py b/tools/perf/scripts/python/event_analyzing_sample.py index 4e843b9864ec..aa1e2cfa26a6 100644 --- a/tools/perf/scripts/python/event_analyzing_sample.py +++ b/tools/perf/scripts/python/event_analyzing_sample.py | |||
| @@ -15,6 +15,8 @@ | |||
| 15 | # for a x86 HW PMU event: PEBS with load latency data. | 15 | # for a x86 HW PMU event: PEBS with load latency data. |
| 16 | # | 16 | # |
| 17 | 17 | ||
| 18 | from __future__ import print_function | ||
| 19 | |||
| 18 | import os | 20 | import os |
| 19 | import sys | 21 | import sys |
| 20 | import math | 22 | import math |
| @@ -37,7 +39,7 @@ con = sqlite3.connect("/dev/shm/perf.db") | |||
| 37 | con.isolation_level = None | 39 | con.isolation_level = None |
| 38 | 40 | ||
| 39 | def trace_begin(): | 41 | def trace_begin(): |
| 40 | print "In trace_begin:\n" | 42 | print("In trace_begin:\n") |
| 41 | 43 | ||
| 42 | # | 44 | # |
| 43 | # Will create several tables at the start, pebs_ll is for PEBS data with | 45 | # Will create several tables at the start, pebs_ll is for PEBS data with |
| @@ -76,12 +78,12 @@ def process_event(param_dict): | |||
| 76 | name = param_dict["ev_name"] | 78 | name = param_dict["ev_name"] |
| 77 | 79 | ||
| 78 | # Symbol and dso info are not always resolved | 80 | # Symbol and dso info are not always resolved |
| 79 | if (param_dict.has_key("dso")): | 81 | if ("dso" in param_dict): |
| 80 | dso = param_dict["dso"] | 82 | dso = param_dict["dso"] |
| 81 | else: | 83 | else: |
| 82 | dso = "Unknown_dso" | 84 | dso = "Unknown_dso" |
| 83 | 85 | ||
| 84 | if (param_dict.has_key("symbol")): | 86 | if ("symbol" in param_dict): |
| 85 | symbol = param_dict["symbol"] | 87 | symbol = param_dict["symbol"] |
| 86 | else: | 88 | else: |
| 87 | symbol = "Unknown_symbol" | 89 | symbol = "Unknown_symbol" |
| @@ -102,7 +104,7 @@ def insert_db(event): | |||
| 102 | event.ip, event.status, event.dse, event.dla, event.lat)) | 104 | event.ip, event.status, event.dse, event.dla, event.lat)) |
| 103 | 105 | ||
| 104 | def trace_end(): | 106 | def trace_end(): |
| 105 | print "In trace_end:\n" | 107 | print("In trace_end:\n") |
| 106 | # We show the basic info for the 2 type of event classes | 108 | # We show the basic info for the 2 type of event classes |
| 107 | show_general_events() | 109 | show_general_events() |
| 108 | show_pebs_ll() | 110 | show_pebs_ll() |
| @@ -123,29 +125,29 @@ def show_general_events(): | |||
| 123 | # Check the total record number in the table | 125 | # Check the total record number in the table |
| 124 | count = con.execute("select count(*) from gen_events") | 126 | count = con.execute("select count(*) from gen_events") |
| 125 | for t in count: | 127 | for t in count: |
| 126 | print "There is %d records in gen_events table" % t[0] | 128 | print("There is %d records in gen_events table" % t[0]) |
| 127 | if t[0] == 0: | 129 | if t[0] == 0: |
| 128 | return | 130 | return |
| 129 | 131 | ||
| 130 | print "Statistics about the general events grouped by thread/symbol/dso: \n" | 132 | print("Statistics about the general events grouped by thread/symbol/dso: \n") |
| 131 | 133 | ||
| 132 | # Group by thread | 134 | # Group by thread |
| 133 | commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)") | 135 | commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)") |
| 134 | print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42) | 136 | print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)) |
| 135 | for row in commq: | 137 | for row in commq: |
| 136 | print "%16s %8d %s" % (row[0], row[1], num2sym(row[1])) | 138 | print("%16s %8d %s" % (row[0], row[1], num2sym(row[1]))) |
| 137 | 139 | ||
| 138 | # Group by symbol | 140 | # Group by symbol |
| 139 | print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58) | 141 | print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)) |
| 140 | symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)") | 142 | symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)") |
| 141 | for row in symbolq: | 143 | for row in symbolq: |
| 142 | print "%32s %8d %s" % (row[0], row[1], num2sym(row[1])) | 144 | print("%32s %8d %s" % (row[0], row[1], num2sym(row[1]))) |
| 143 | 145 | ||
| 144 | # Group by dso | 146 | # Group by dso |
| 145 | print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74) | 147 | print("\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)) |
| 146 | dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)") | 148 | dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)") |
| 147 | for row in dsoq: | 149 | for row in dsoq: |
| 148 | print "%40s %8d %s" % (row[0], row[1], num2sym(row[1])) | 150 | print("%40s %8d %s" % (row[0], row[1], num2sym(row[1]))) |
| 149 | 151 | ||
| 150 | # | 152 | # |
| 151 | # This function just shows the basic info, and we could do more with the | 153 | # This function just shows the basic info, and we could do more with the |
| @@ -156,35 +158,35 @@ def show_pebs_ll(): | |||
| 156 | 158 | ||
| 157 | count = con.execute("select count(*) from pebs_ll") | 159 | count = con.execute("select count(*) from pebs_ll") |
| 158 | for t in count: | 160 | for t in count: |
| 159 | print "There is %d records in pebs_ll table" % t[0] | 161 | print("There is %d records in pebs_ll table" % t[0]) |
| 160 | if t[0] == 0: | 162 | if t[0] == 0: |
| 161 | return | 163 | return |
| 162 | 164 | ||
| 163 | print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n" | 165 | print("Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n") |
| 164 | 166 | ||
| 165 | # Group by thread | 167 | # Group by thread |
| 166 | commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)") | 168 | commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)") |
| 167 | print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42) | 169 | print("\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)) |
| 168 | for row in commq: | 170 | for row in commq: |
| 169 | print "%16s %8d %s" % (row[0], row[1], num2sym(row[1])) | 171 | print("%16s %8d %s" % (row[0], row[1], num2sym(row[1]))) |
| 170 | 172 | ||
| 171 | # Group by symbol | 173 | # Group by symbol |
| 172 | print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58) | 174 | print("\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)) |
| 173 | symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)") | 175 | symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)") |
| 174 | for row in symbolq: | 176 | for row in symbolq: |
| 175 | print "%32s %8d %s" % (row[0], row[1], num2sym(row[1])) | 177 | print("%32s %8d %s" % (row[0], row[1], num2sym(row[1]))) |
| 176 | 178 | ||
| 177 | # Group by dse | 179 | # Group by dse |
| 178 | dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)") | 180 | dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)") |
| 179 | print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58) | 181 | print("\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)) |
| 180 | for row in dseq: | 182 | for row in dseq: |
| 181 | print "%32s %8d %s" % (row[0], row[1], num2sym(row[1])) | 183 | print("%32s %8d %s" % (row[0], row[1], num2sym(row[1]))) |
| 182 | 184 | ||
| 183 | # Group by latency | 185 | # Group by latency |
| 184 | latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat") | 186 | latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat") |
| 185 | print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58) | 187 | print("\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)) |
| 186 | for row in latq: | 188 | for row in latq: |
| 187 | print "%32s %8d %s" % (row[0], row[1], num2sym(row[1])) | 189 | print("%32s %8d %s" % (row[0], row[1], num2sym(row[1]))) |
| 188 | 190 | ||
| 189 | def trace_unhandled(event_name, context, event_fields_dict): | 191 | def trace_unhandled(event_name, context, event_fields_dict): |
| 190 | print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]) | 192 | print (' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])) |
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py index 30130213da7e..390a351d15ea 100644 --- a/tools/perf/scripts/python/export-to-postgresql.py +++ b/tools/perf/scripts/python/export-to-postgresql.py | |||
| @@ -394,7 +394,8 @@ if perf_db_export_calls: | |||
| 394 | 'call_id bigint,' | 394 | 'call_id bigint,' |
| 395 | 'return_id bigint,' | 395 | 'return_id bigint,' |
| 396 | 'parent_call_path_id bigint,' | 396 | 'parent_call_path_id bigint,' |
| 397 | 'flags integer)') | 397 | 'flags integer,' |
| 398 | 'parent_id bigint)') | ||
| 398 | 399 | ||
| 399 | do_query(query, 'CREATE VIEW machines_view AS ' | 400 | do_query(query, 'CREATE VIEW machines_view AS ' |
| 400 | 'SELECT ' | 401 | 'SELECT ' |
| @@ -478,8 +479,9 @@ if perf_db_export_calls: | |||
| 478 | 'branch_count,' | 479 | 'branch_count,' |
| 479 | 'call_id,' | 480 | 'call_id,' |
| 480 | 'return_id,' | 481 | 'return_id,' |
| 481 | 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,' | 482 | 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE CAST ( flags AS VARCHAR(6) ) END AS flags,' |
| 482 | 'parent_call_path_id' | 483 | 'parent_call_path_id,' |
| 484 | 'calls.parent_id' | ||
| 483 | ' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id') | 485 | ' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id') |
| 484 | 486 | ||
| 485 | do_query(query, 'CREATE VIEW samples_view AS ' | 487 | do_query(query, 'CREATE VIEW samples_view AS ' |
| @@ -575,6 +577,7 @@ def trace_begin(): | |||
| 575 | sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | 577 | sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) |
| 576 | if perf_db_export_calls or perf_db_export_callchains: | 578 | if perf_db_export_calls or perf_db_export_callchains: |
| 577 | call_path_table(0, 0, 0, 0) | 579 | call_path_table(0, 0, 0, 0) |
| 580 | call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | ||
| 578 | 581 | ||
| 579 | unhandled_count = 0 | 582 | unhandled_count = 0 |
| 580 | 583 | ||
| @@ -657,6 +660,7 @@ def trace_end(): | |||
| 657 | 'ADD CONSTRAINT returnfk FOREIGN KEY (return_id) REFERENCES samples (id),' | 660 | 'ADD CONSTRAINT returnfk FOREIGN KEY (return_id) REFERENCES samples (id),' |
| 658 | 'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)') | 661 | 'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)') |
| 659 | do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)') | 662 | do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)') |
| 663 | do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)') | ||
| 660 | 664 | ||
| 661 | if (unhandled_count): | 665 | if (unhandled_count): |
| 662 | print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events" | 666 | print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events" |
| @@ -728,7 +732,7 @@ def call_path_table(cp_id, parent_id, symbol_id, ip, *x): | |||
| 728 | value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip) | 732 | value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip) |
| 729 | call_path_file.write(value) | 733 | call_path_file.write(value) |
| 730 | 734 | ||
| 731 | def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, *x): | 735 | def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, parent_id, *x): |
| 732 | fmt = "!hiqiqiqiqiqiqiqiqiqiqii" | 736 | fmt = "!hiqiqiqiqiqiqiqiqiqiqiiiq" |
| 733 | value = struct.pack(fmt, 11, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags) | 737 | value = struct.pack(fmt, 12, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags, 8, parent_id) |
| 734 | call_file.write(value) | 738 | call_file.write(value) |
diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py index ed237f2ed03f..eb63e6c7107f 100644 --- a/tools/perf/scripts/python/export-to-sqlite.py +++ b/tools/perf/scripts/python/export-to-sqlite.py | |||
| @@ -222,7 +222,8 @@ if perf_db_export_calls: | |||
| 222 | 'call_id bigint,' | 222 | 'call_id bigint,' |
| 223 | 'return_id bigint,' | 223 | 'return_id bigint,' |
| 224 | 'parent_call_path_id bigint,' | 224 | 'parent_call_path_id bigint,' |
| 225 | 'flags integer)') | 225 | 'flags integer,' |
| 226 | 'parent_id bigint)') | ||
| 226 | 227 | ||
| 227 | # printf was added to sqlite in version 3.8.3 | 228 | # printf was added to sqlite in version 3.8.3 |
| 228 | sqlite_has_printf = False | 229 | sqlite_has_printf = False |
| @@ -321,7 +322,8 @@ if perf_db_export_calls: | |||
| 321 | 'call_id,' | 322 | 'call_id,' |
| 322 | 'return_id,' | 323 | 'return_id,' |
| 323 | 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,' | 324 | 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,' |
| 324 | 'parent_call_path_id' | 325 | 'parent_call_path_id,' |
| 326 | 'parent_id' | ||
| 325 | ' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id') | 327 | ' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id') |
| 326 | 328 | ||
| 327 | do_query(query, 'CREATE VIEW samples_view AS ' | 329 | do_query(query, 'CREATE VIEW samples_view AS ' |
| @@ -373,7 +375,7 @@ if perf_db_export_calls or perf_db_export_callchains: | |||
| 373 | call_path_query.prepare("INSERT INTO call_paths VALUES (?, ?, ?, ?)") | 375 | call_path_query.prepare("INSERT INTO call_paths VALUES (?, ?, ?, ?)") |
| 374 | if perf_db_export_calls: | 376 | if perf_db_export_calls: |
| 375 | call_query = QSqlQuery(db) | 377 | call_query = QSqlQuery(db) |
| 376 | call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") | 378 | call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") |
| 377 | 379 | ||
| 378 | def trace_begin(): | 380 | def trace_begin(): |
| 379 | print datetime.datetime.today(), "Writing records..." | 381 | print datetime.datetime.today(), "Writing records..." |
| @@ -388,6 +390,7 @@ def trace_begin(): | |||
| 388 | sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | 390 | sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) |
| 389 | if perf_db_export_calls or perf_db_export_callchains: | 391 | if perf_db_export_calls or perf_db_export_callchains: |
| 390 | call_path_table(0, 0, 0, 0) | 392 | call_path_table(0, 0, 0, 0) |
| 393 | call_return_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | ||
| 391 | 394 | ||
| 392 | unhandled_count = 0 | 395 | unhandled_count = 0 |
| 393 | 396 | ||
| @@ -397,6 +400,7 @@ def trace_end(): | |||
| 397 | print datetime.datetime.today(), "Adding indexes" | 400 | print datetime.datetime.today(), "Adding indexes" |
| 398 | if perf_db_export_calls: | 401 | if perf_db_export_calls: |
| 399 | do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)') | 402 | do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)') |
| 403 | do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)') | ||
| 400 | 404 | ||
| 401 | if (unhandled_count): | 405 | if (unhandled_count): |
| 402 | print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events" | 406 | print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events" |
| @@ -452,4 +456,4 @@ def call_path_table(*x): | |||
| 452 | bind_exec(call_path_query, 4, x) | 456 | bind_exec(call_path_query, 4, x) |
| 453 | 457 | ||
| 454 | def call_return_table(*x): | 458 | def call_return_table(*x): |
| 455 | bind_exec(call_query, 11, x) | 459 | bind_exec(call_query, 12, x) |
diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py index 09ce73b07d35..afec9479ca7f 100755 --- a/tools/perf/scripts/python/exported-sql-viewer.py +++ b/tools/perf/scripts/python/exported-sql-viewer.py | |||
| @@ -167,9 +167,10 @@ class Thread(QThread): | |||
| 167 | 167 | ||
| 168 | class TreeModel(QAbstractItemModel): | 168 | class TreeModel(QAbstractItemModel): |
| 169 | 169 | ||
| 170 | def __init__(self, root, parent=None): | 170 | def __init__(self, glb, parent=None): |
| 171 | super(TreeModel, self).__init__(parent) | 171 | super(TreeModel, self).__init__(parent) |
| 172 | self.root = root | 172 | self.glb = glb |
| 173 | self.root = self.GetRoot() | ||
| 173 | self.last_row_read = 0 | 174 | self.last_row_read = 0 |
| 174 | 175 | ||
| 175 | def Item(self, parent): | 176 | def Item(self, parent): |
| @@ -557,24 +558,12 @@ class CallGraphRootItem(CallGraphLevelItemBase): | |||
| 557 | self.child_items.append(child_item) | 558 | self.child_items.append(child_item) |
| 558 | self.child_count += 1 | 559 | self.child_count += 1 |
| 559 | 560 | ||
| 560 | # Context-sensitive call graph data model | 561 | # Context-sensitive call graph data model base |
| 561 | 562 | ||
| 562 | class CallGraphModel(TreeModel): | 563 | class CallGraphModelBase(TreeModel): |
| 563 | 564 | ||
| 564 | def __init__(self, glb, parent=None): | 565 | def __init__(self, glb, parent=None): |
| 565 | super(CallGraphModel, self).__init__(CallGraphRootItem(glb), parent) | 566 | super(CallGraphModelBase, self).__init__(glb, parent) |
| 566 | self.glb = glb | ||
| 567 | |||
| 568 | def columnCount(self, parent=None): | ||
| 569 | return 7 | ||
| 570 | |||
| 571 | def columnHeader(self, column): | ||
| 572 | headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "] | ||
| 573 | return headers[column] | ||
| 574 | |||
| 575 | def columnAlignment(self, column): | ||
| 576 | alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ] | ||
| 577 | return alignment[column] | ||
| 578 | 567 | ||
| 579 | def FindSelect(self, value, pattern, query): | 568 | def FindSelect(self, value, pattern, query): |
| 580 | if pattern: | 569 | if pattern: |
| @@ -594,34 +583,7 @@ class CallGraphModel(TreeModel): | |||
| 594 | match = " GLOB '" + str(value) + "'" | 583 | match = " GLOB '" + str(value) + "'" |
| 595 | else: | 584 | else: |
| 596 | match = " = '" + str(value) + "'" | 585 | match = " = '" + str(value) + "'" |
| 597 | QueryExec(query, "SELECT call_path_id, comm_id, thread_id" | 586 | self.DoFindSelect(query, match) |
| 598 | " FROM calls" | ||
| 599 | " INNER JOIN call_paths ON calls.call_path_id = call_paths.id" | ||
| 600 | " INNER JOIN symbols ON call_paths.symbol_id = symbols.id" | ||
| 601 | " WHERE symbols.name" + match + | ||
| 602 | " GROUP BY comm_id, thread_id, call_path_id" | ||
| 603 | " ORDER BY comm_id, thread_id, call_path_id") | ||
| 604 | |||
| 605 | def FindPath(self, query): | ||
| 606 | # Turn the query result into a list of ids that the tree view can walk | ||
| 607 | # to open the tree at the right place. | ||
| 608 | ids = [] | ||
| 609 | parent_id = query.value(0) | ||
| 610 | while parent_id: | ||
| 611 | ids.insert(0, parent_id) | ||
| 612 | q2 = QSqlQuery(self.glb.db) | ||
| 613 | QueryExec(q2, "SELECT parent_id" | ||
| 614 | " FROM call_paths" | ||
| 615 | " WHERE id = " + str(parent_id)) | ||
| 616 | if not q2.next(): | ||
| 617 | break | ||
| 618 | parent_id = q2.value(0) | ||
| 619 | # The call path root is not used | ||
| 620 | if ids[0] == 1: | ||
| 621 | del ids[0] | ||
| 622 | ids.insert(0, query.value(2)) | ||
| 623 | ids.insert(0, query.value(1)) | ||
| 624 | return ids | ||
| 625 | 587 | ||
| 626 | def Found(self, query, found): | 588 | def Found(self, query, found): |
| 627 | if found: | 589 | if found: |
| @@ -675,6 +637,201 @@ class CallGraphModel(TreeModel): | |||
| 675 | def FindDone(self, thread, callback, ids): | 637 | def FindDone(self, thread, callback, ids): |
| 676 | callback(ids) | 638 | callback(ids) |
| 677 | 639 | ||
| 640 | # Context-sensitive call graph data model | ||
| 641 | |||
| 642 | class CallGraphModel(CallGraphModelBase): | ||
| 643 | |||
| 644 | def __init__(self, glb, parent=None): | ||
| 645 | super(CallGraphModel, self).__init__(glb, parent) | ||
| 646 | |||
| 647 | def GetRoot(self): | ||
| 648 | return CallGraphRootItem(self.glb) | ||
| 649 | |||
| 650 | def columnCount(self, parent=None): | ||
| 651 | return 7 | ||
| 652 | |||
| 653 | def columnHeader(self, column): | ||
| 654 | headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "] | ||
| 655 | return headers[column] | ||
| 656 | |||
| 657 | def columnAlignment(self, column): | ||
| 658 | alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ] | ||
| 659 | return alignment[column] | ||
| 660 | |||
| 661 | def DoFindSelect(self, query, match): | ||
| 662 | QueryExec(query, "SELECT call_path_id, comm_id, thread_id" | ||
| 663 | " FROM calls" | ||
| 664 | " INNER JOIN call_paths ON calls.call_path_id = call_paths.id" | ||
| 665 | " INNER JOIN symbols ON call_paths.symbol_id = symbols.id" | ||
| 666 | " WHERE symbols.name" + match + | ||
| 667 | " GROUP BY comm_id, thread_id, call_path_id" | ||
| 668 | " ORDER BY comm_id, thread_id, call_path_id") | ||
| 669 | |||
| 670 | def FindPath(self, query): | ||
| 671 | # Turn the query result into a list of ids that the tree view can walk | ||
| 672 | # to open the tree at the right place. | ||
| 673 | ids = [] | ||
| 674 | parent_id = query.value(0) | ||
| 675 | while parent_id: | ||
| 676 | ids.insert(0, parent_id) | ||
| 677 | q2 = QSqlQuery(self.glb.db) | ||
| 678 | QueryExec(q2, "SELECT parent_id" | ||
| 679 | " FROM call_paths" | ||
| 680 | " WHERE id = " + str(parent_id)) | ||
| 681 | if not q2.next(): | ||
| 682 | break | ||
| 683 | parent_id = q2.value(0) | ||
| 684 | # The call path root is not used | ||
| 685 | if ids[0] == 1: | ||
| 686 | del ids[0] | ||
| 687 | ids.insert(0, query.value(2)) | ||
| 688 | ids.insert(0, query.value(1)) | ||
| 689 | return ids | ||
| 690 | |||
| 691 | # Call tree data model level 2+ item base | ||
| 692 | |||
| 693 | class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase): | ||
| 694 | |||
| 695 | def __init__(self, glb, row, comm_id, thread_id, calls_id, time, branch_count, parent_item): | ||
| 696 | super(CallTreeLevelTwoPlusItemBase, self).__init__(glb, row, parent_item) | ||
| 697 | self.comm_id = comm_id | ||
| 698 | self.thread_id = thread_id | ||
| 699 | self.calls_id = calls_id | ||
| 700 | self.branch_count = branch_count | ||
| 701 | self.time = time | ||
| 702 | |||
| 703 | def Select(self): | ||
| 704 | self.query_done = True; | ||
| 705 | if self.calls_id == 0: | ||
| 706 | comm_thread = " AND comm_id = " + str(self.comm_id) + " AND thread_id = " + str(self.thread_id) | ||
| 707 | else: | ||
| 708 | comm_thread = "" | ||
| 709 | query = QSqlQuery(self.glb.db) | ||
| 710 | QueryExec(query, "SELECT calls.id, name, short_name, call_time, return_time - call_time, branch_count" | ||
| 711 | " FROM calls" | ||
| 712 | " INNER JOIN call_paths ON calls.call_path_id = call_paths.id" | ||
| 713 | " INNER JOIN symbols ON call_paths.symbol_id = symbols.id" | ||
| 714 | " INNER JOIN dsos ON symbols.dso_id = dsos.id" | ||
| 715 | " WHERE calls.parent_id = " + str(self.calls_id) + comm_thread + | ||
| 716 | " ORDER BY call_time, calls.id") | ||
| 717 | while query.next(): | ||
| 718 | child_item = CallTreeLevelThreeItem(self.glb, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), int(query.value(5)), self) | ||
| 719 | self.child_items.append(child_item) | ||
| 720 | self.child_count += 1 | ||
| 721 | |||
| 722 | # Call tree data model level three item | ||
| 723 | |||
| 724 | class CallTreeLevelThreeItem(CallTreeLevelTwoPlusItemBase): | ||
| 725 | |||
| 726 | def __init__(self, glb, row, comm_id, thread_id, calls_id, name, dso, count, time, branch_count, parent_item): | ||
| 727 | super(CallTreeLevelThreeItem, self).__init__(glb, row, comm_id, thread_id, calls_id, time, branch_count, parent_item) | ||
| 728 | dso = dsoname(dso) | ||
| 729 | self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ] | ||
| 730 | self.dbid = calls_id | ||
| 731 | |||
| 732 | # Call tree data model level two item | ||
| 733 | |||
| 734 | class CallTreeLevelTwoItem(CallTreeLevelTwoPlusItemBase): | ||
| 735 | |||
| 736 | def __init__(self, glb, row, comm_id, thread_id, pid, tid, parent_item): | ||
| 737 | super(CallTreeLevelTwoItem, self).__init__(glb, row, comm_id, thread_id, 0, 0, 0, parent_item) | ||
| 738 | self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""] | ||
| 739 | self.dbid = thread_id | ||
| 740 | |||
| 741 | def Select(self): | ||
| 742 | super(CallTreeLevelTwoItem, self).Select() | ||
| 743 | for child_item in self.child_items: | ||
| 744 | self.time += child_item.time | ||
| 745 | self.branch_count += child_item.branch_count | ||
| 746 | for child_item in self.child_items: | ||
| 747 | child_item.data[4] = PercentToOneDP(child_item.time, self.time) | ||
| 748 | child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count) | ||
| 749 | |||
| 750 | # Call tree data model level one item | ||
| 751 | |||
| 752 | class CallTreeLevelOneItem(CallGraphLevelItemBase): | ||
| 753 | |||
| 754 | def __init__(self, glb, row, comm_id, comm, parent_item): | ||
| 755 | super(CallTreeLevelOneItem, self).__init__(glb, row, parent_item) | ||
| 756 | self.data = [comm, "", "", "", "", "", ""] | ||
| 757 | self.dbid = comm_id | ||
| 758 | |||
| 759 | def Select(self): | ||
| 760 | self.query_done = True; | ||
| 761 | query = QSqlQuery(self.glb.db) | ||
| 762 | QueryExec(query, "SELECT thread_id, pid, tid" | ||
| 763 | " FROM comm_threads" | ||
| 764 | " INNER JOIN threads ON thread_id = threads.id" | ||
| 765 | " WHERE comm_id = " + str(self.dbid)) | ||
| 766 | while query.next(): | ||
| 767 | child_item = CallTreeLevelTwoItem(self.glb, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self) | ||
| 768 | self.child_items.append(child_item) | ||
| 769 | self.child_count += 1 | ||
| 770 | |||
| 771 | # Call tree data model root item | ||
| 772 | |||
| 773 | class CallTreeRootItem(CallGraphLevelItemBase): | ||
| 774 | |||
| 775 | def __init__(self, glb): | ||
| 776 | super(CallTreeRootItem, self).__init__(glb, 0, None) | ||
| 777 | self.dbid = 0 | ||
| 778 | self.query_done = True; | ||
| 779 | query = QSqlQuery(glb.db) | ||
| 780 | QueryExec(query, "SELECT id, comm FROM comms") | ||
| 781 | while query.next(): | ||
| 782 | if not query.value(0): | ||
| 783 | continue | ||
| 784 | child_item = CallTreeLevelOneItem(glb, self.child_count, query.value(0), query.value(1), self) | ||
| 785 | self.child_items.append(child_item) | ||
| 786 | self.child_count += 1 | ||
| 787 | |||
| 788 | # Call Tree data model | ||
| 789 | |||
| 790 | class CallTreeModel(CallGraphModelBase): | ||
| 791 | |||
| 792 | def __init__(self, glb, parent=None): | ||
| 793 | super(CallTreeModel, self).__init__(glb, parent) | ||
| 794 | |||
| 795 | def GetRoot(self): | ||
| 796 | return CallTreeRootItem(self.glb) | ||
| 797 | |||
| 798 | def columnCount(self, parent=None): | ||
| 799 | return 7 | ||
| 800 | |||
| 801 | def columnHeader(self, column): | ||
| 802 | headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "] | ||
| 803 | return headers[column] | ||
| 804 | |||
| 805 | def columnAlignment(self, column): | ||
| 806 | alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ] | ||
| 807 | return alignment[column] | ||
| 808 | |||
| 809 | def DoFindSelect(self, query, match): | ||
| 810 | QueryExec(query, "SELECT calls.id, comm_id, thread_id" | ||
| 811 | " FROM calls" | ||
| 812 | " INNER JOIN call_paths ON calls.call_path_id = call_paths.id" | ||
| 813 | " INNER JOIN symbols ON call_paths.symbol_id = symbols.id" | ||
| 814 | " WHERE symbols.name" + match + | ||
| 815 | " ORDER BY comm_id, thread_id, call_time, calls.id") | ||
| 816 | |||
| 817 | def FindPath(self, query): | ||
| 818 | # Turn the query result into a list of ids that the tree view can walk | ||
| 819 | # to open the tree at the right place. | ||
| 820 | ids = [] | ||
| 821 | parent_id = query.value(0) | ||
| 822 | while parent_id: | ||
| 823 | ids.insert(0, parent_id) | ||
| 824 | q2 = QSqlQuery(self.glb.db) | ||
| 825 | QueryExec(q2, "SELECT parent_id" | ||
| 826 | " FROM calls" | ||
| 827 | " WHERE id = " + str(parent_id)) | ||
| 828 | if not q2.next(): | ||
| 829 | break | ||
| 830 | parent_id = q2.value(0) | ||
| 831 | ids.insert(0, query.value(2)) | ||
| 832 | ids.insert(0, query.value(1)) | ||
| 833 | return ids | ||
| 834 | |||
| 678 | # Vertical widget layout | 835 | # Vertical widget layout |
| 679 | 836 | ||
| 680 | class VBox(): | 837 | class VBox(): |
| @@ -693,28 +850,16 @@ class VBox(): | |||
| 693 | def Widget(self): | 850 | def Widget(self): |
| 694 | return self.vbox | 851 | return self.vbox |
| 695 | 852 | ||
| 696 | # Context-sensitive call graph window | 853 | # Tree window base |
| 697 | |||
| 698 | class CallGraphWindow(QMdiSubWindow): | ||
| 699 | |||
| 700 | def __init__(self, glb, parent=None): | ||
| 701 | super(CallGraphWindow, self).__init__(parent) | ||
| 702 | |||
| 703 | self.model = LookupCreateModel("Context-Sensitive Call Graph", lambda x=glb: CallGraphModel(x)) | ||
| 704 | |||
| 705 | self.view = QTreeView() | ||
| 706 | self.view.setModel(self.model) | ||
| 707 | |||
| 708 | for c, w in ((0, 250), (1, 100), (2, 60), (3, 70), (4, 70), (5, 100)): | ||
| 709 | self.view.setColumnWidth(c, w) | ||
| 710 | |||
| 711 | self.find_bar = FindBar(self, self) | ||
| 712 | 854 | ||
| 713 | self.vbox = VBox(self.view, self.find_bar.Widget()) | 855 | class TreeWindowBase(QMdiSubWindow): |
| 714 | 856 | ||
| 715 | self.setWidget(self.vbox.Widget()) | 857 | def __init__(self, parent=None): |
| 858 | super(TreeWindowBase, self).__init__(parent) | ||
| 716 | 859 | ||
| 717 | AddSubWindow(glb.mainwindow.mdi_area, self, "Context-Sensitive Call Graph") | 860 | self.model = None |
| 861 | self.view = None | ||
| 862 | self.find_bar = None | ||
| 718 | 863 | ||
| 719 | def DisplayFound(self, ids): | 864 | def DisplayFound(self, ids): |
| 720 | if not len(ids): | 865 | if not len(ids): |
| @@ -747,6 +892,53 @@ class CallGraphWindow(QMdiSubWindow): | |||
| 747 | if not found: | 892 | if not found: |
| 748 | self.find_bar.NotFound() | 893 | self.find_bar.NotFound() |
| 749 | 894 | ||
| 895 | |||
| 896 | # Context-sensitive call graph window | ||
| 897 | |||
| 898 | class CallGraphWindow(TreeWindowBase): | ||
| 899 | |||
| 900 | def __init__(self, glb, parent=None): | ||
| 901 | super(CallGraphWindow, self).__init__(parent) | ||
| 902 | |||
| 903 | self.model = LookupCreateModel("Context-Sensitive Call Graph", lambda x=glb: CallGraphModel(x)) | ||
| 904 | |||
| 905 | self.view = QTreeView() | ||
| 906 | self.view.setModel(self.model) | ||
| 907 | |||
| 908 | for c, w in ((0, 250), (1, 100), (2, 60), (3, 70), (4, 70), (5, 100)): | ||
| 909 | self.view.setColumnWidth(c, w) | ||
| 910 | |||
| 911 | self.find_bar = FindBar(self, self) | ||
| 912 | |||
| 913 | self.vbox = VBox(self.view, self.find_bar.Widget()) | ||
| 914 | |||
| 915 | self.setWidget(self.vbox.Widget()) | ||
| 916 | |||
| 917 | AddSubWindow(glb.mainwindow.mdi_area, self, "Context-Sensitive Call Graph") | ||
| 918 | |||
| 919 | # Call tree window | ||
| 920 | |||
| 921 | class CallTreeWindow(TreeWindowBase): | ||
| 922 | |||
| 923 | def __init__(self, glb, parent=None): | ||
| 924 | super(CallTreeWindow, self).__init__(parent) | ||
| 925 | |||
| 926 | self.model = LookupCreateModel("Call Tree", lambda x=glb: CallTreeModel(x)) | ||
| 927 | |||
| 928 | self.view = QTreeView() | ||
| 929 | self.view.setModel(self.model) | ||
| 930 | |||
| 931 | for c, w in ((0, 230), (1, 100), (2, 100), (3, 70), (4, 70), (5, 100)): | ||
| 932 | self.view.setColumnWidth(c, w) | ||
| 933 | |||
| 934 | self.find_bar = FindBar(self, self) | ||
| 935 | |||
| 936 | self.vbox = VBox(self.view, self.find_bar.Widget()) | ||
| 937 | |||
| 938 | self.setWidget(self.vbox.Widget()) | ||
| 939 | |||
| 940 | AddSubWindow(glb.mainwindow.mdi_area, self, "Call Tree") | ||
| 941 | |||
| 750 | # Child data item finder | 942 | # Child data item finder |
| 751 | 943 | ||
| 752 | class ChildDataItemFinder(): | 944 | class ChildDataItemFinder(): |
| @@ -1327,8 +1519,7 @@ class BranchModel(TreeModel): | |||
| 1327 | progress = Signal(object) | 1519 | progress = Signal(object) |
| 1328 | 1520 | ||
| 1329 | def __init__(self, glb, event_id, where_clause, parent=None): | 1521 | def __init__(self, glb, event_id, where_clause, parent=None): |
| 1330 | super(BranchModel, self).__init__(BranchRootItem(), parent) | 1522 | super(BranchModel, self).__init__(glb, parent) |
| 1331 | self.glb = glb | ||
| 1332 | self.event_id = event_id | 1523 | self.event_id = event_id |
| 1333 | self.more = True | 1524 | self.more = True |
| 1334 | self.populated = 0 | 1525 | self.populated = 0 |
| @@ -1352,6 +1543,9 @@ class BranchModel(TreeModel): | |||
| 1352 | self.fetcher.done.connect(self.Update) | 1543 | self.fetcher.done.connect(self.Update) |
| 1353 | self.fetcher.Fetch(glb_chunk_sz) | 1544 | self.fetcher.Fetch(glb_chunk_sz) |
| 1354 | 1545 | ||
| 1546 | def GetRoot(self): | ||
| 1547 | return BranchRootItem() | ||
| 1548 | |||
| 1355 | def columnCount(self, parent=None): | 1549 | def columnCount(self, parent=None): |
| 1356 | return 8 | 1550 | return 8 |
| 1357 | 1551 | ||
| @@ -1863,10 +2057,10 @@ def GetEventList(db): | |||
| 1863 | 2057 | ||
| 1864 | # Is a table selectable | 2058 | # Is a table selectable |
| 1865 | 2059 | ||
| 1866 | def IsSelectable(db, table): | 2060 | def IsSelectable(db, table, sql = ""): |
| 1867 | query = QSqlQuery(db) | 2061 | query = QSqlQuery(db) |
| 1868 | try: | 2062 | try: |
| 1869 | QueryExec(query, "SELECT * FROM " + table + " LIMIT 1") | 2063 | QueryExec(query, "SELECT * FROM " + table + " " + sql + " LIMIT 1") |
| 1870 | except: | 2064 | except: |
| 1871 | return False | 2065 | return False |
| 1872 | return True | 2066 | return True |
| @@ -2275,9 +2469,10 @@ p.c2 { | |||
| 2275 | </style> | 2469 | </style> |
| 2276 | <p class=c1><a href=#reports>1. Reports</a></p> | 2470 | <p class=c1><a href=#reports>1. Reports</a></p> |
| 2277 | <p class=c2><a href=#callgraph>1.1 Context-Sensitive Call Graph</a></p> | 2471 | <p class=c2><a href=#callgraph>1.1 Context-Sensitive Call Graph</a></p> |
| 2278 | <p class=c2><a href=#allbranches>1.2 All branches</a></p> | 2472 | <p class=c2><a href=#calltree>1.2 Call Tree</a></p> |
| 2279 | <p class=c2><a href=#selectedbranches>1.3 Selected branches</a></p> | 2473 | <p class=c2><a href=#allbranches>1.3 All branches</a></p> |
| 2280 | <p class=c2><a href=#topcallsbyelapsedtime>1.4 Top calls by elapsed time</a></p> | 2474 | <p class=c2><a href=#selectedbranches>1.4 Selected branches</a></p> |
| 2475 | <p class=c2><a href=#topcallsbyelapsedtime>1.5 Top calls by elapsed time</a></p> | ||
| 2281 | <p class=c1><a href=#tables>2. Tables</a></p> | 2476 | <p class=c1><a href=#tables>2. Tables</a></p> |
| 2282 | <h1 id=reports>1. Reports</h1> | 2477 | <h1 id=reports>1. Reports</h1> |
| 2283 | <h2 id=callgraph>1.1 Context-Sensitive Call Graph</h2> | 2478 | <h2 id=callgraph>1.1 Context-Sensitive Call Graph</h2> |
| @@ -2313,7 +2508,10 @@ v- ls | |||
| 2313 | <h3>Find</h3> | 2508 | <h3>Find</h3> |
| 2314 | Ctrl-F displays a Find bar which finds function names by either an exact match or a pattern match. | 2509 | Ctrl-F displays a Find bar which finds function names by either an exact match or a pattern match. |
| 2315 | The pattern matching symbols are ? for any character and * for zero or more characters. | 2510 | The pattern matching symbols are ? for any character and * for zero or more characters. |
| 2316 | <h2 id=allbranches>1.2 All branches</h2> | 2511 | <h2 id=calltree>1.2 Call Tree</h2> |
| 2512 | The Call Tree report is very similar to the Context-Sensitive Call Graph, but the data is not aggregated. | ||
| 2513 | Also the 'Count' column, which would be always 1, is replaced by the 'Call Time'. | ||
| 2514 | <h2 id=allbranches>1.3 All branches</h2> | ||
| 2317 | The All branches report displays all branches in chronological order. | 2515 | The All branches report displays all branches in chronological order. |
| 2318 | Not all data is fetched immediately. More records can be fetched using the Fetch bar provided. | 2516 | Not all data is fetched immediately. More records can be fetched using the Fetch bar provided. |
| 2319 | <h3>Disassembly</h3> | 2517 | <h3>Disassembly</h3> |
| @@ -2339,10 +2537,10 @@ sudo ldconfig | |||
| 2339 | Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match. | 2537 | Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match. |
| 2340 | Refer to Python documentation for the regular expression syntax. | 2538 | Refer to Python documentation for the regular expression syntax. |
| 2341 | All columns are searched, but only currently fetched rows are searched. | 2539 | All columns are searched, but only currently fetched rows are searched. |
| 2342 | <h2 id=selectedbranches>1.3 Selected branches</h2> | 2540 | <h2 id=selectedbranches>1.4 Selected branches</h2> |
| 2343 | This is the same as the <a href=#allbranches>All branches</a> report but with the data reduced | 2541 | This is the same as the <a href=#allbranches>All branches</a> report but with the data reduced |
| 2344 | by various selection criteria. A dialog box displays available criteria which are AND'ed together. | 2542 | by various selection criteria. A dialog box displays available criteria which are AND'ed together. |
| 2345 | <h3>1.3.1 Time ranges</h3> | 2543 | <h3>1.4.1 Time ranges</h3> |
| 2346 | The time ranges hint text shows the total time range. Relative time ranges can also be entered in | 2544 | The time ranges hint text shows the total time range. Relative time ranges can also be entered in |
| 2347 | ms, us or ns. Also, negative values are relative to the end of trace. Examples: | 2545 | ms, us or ns. Also, negative values are relative to the end of trace. Examples: |
| 2348 | <pre> | 2546 | <pre> |
| @@ -2353,7 +2551,7 @@ ms, us or ns. Also, negative values are relative to the end of trace. Examples: | |||
| 2353 | -10ms- The last 10ms | 2551 | -10ms- The last 10ms |
| 2354 | </pre> | 2552 | </pre> |
| 2355 | N.B. Due to the granularity of timestamps, there could be no branches in any given time range. | 2553 | N.B. Due to the granularity of timestamps, there could be no branches in any given time range. |
| 2356 | <h2 id=topcallsbyelapsedtime>1.4 Top calls by elapsed time</h2> | 2554 | <h2 id=topcallsbyelapsedtime>1.5 Top calls by elapsed time</h2> |
| 2357 | The Top calls by elapsed time report displays calls in descending order of time elapsed between when the function was called and when it returned. | 2555 | The Top calls by elapsed time report displays calls in descending order of time elapsed between when the function was called and when it returned. |
| 2358 | The data is reduced by various selection criteria. A dialog box displays available criteria which are AND'ed together. | 2556 | The data is reduced by various selection criteria. A dialog box displays available criteria which are AND'ed together. |
| 2359 | If not all data is fetched, a Fetch bar is provided. Ctrl-F displays a Find bar. | 2557 | If not all data is fetched, a Fetch bar is provided. Ctrl-F displays a Find bar. |
| @@ -2489,6 +2687,9 @@ class MainWindow(QMainWindow): | |||
| 2489 | if IsSelectable(glb.db, "calls"): | 2687 | if IsSelectable(glb.db, "calls"): |
| 2490 | reports_menu.addAction(CreateAction("Context-Sensitive Call &Graph", "Create a new window containing a context-sensitive call graph", self.NewCallGraph, self)) | 2688 | reports_menu.addAction(CreateAction("Context-Sensitive Call &Graph", "Create a new window containing a context-sensitive call graph", self.NewCallGraph, self)) |
| 2491 | 2689 | ||
| 2690 | if IsSelectable(glb.db, "calls", "WHERE parent_id >= 0"): | ||
| 2691 | reports_menu.addAction(CreateAction("Call &Tree", "Create a new window containing a call tree", self.NewCallTree, self)) | ||
| 2692 | |||
| 2492 | self.EventMenu(GetEventList(glb.db), reports_menu) | 2693 | self.EventMenu(GetEventList(glb.db), reports_menu) |
| 2493 | 2694 | ||
| 2494 | if IsSelectable(glb.db, "calls"): | 2695 | if IsSelectable(glb.db, "calls"): |
| @@ -2549,6 +2750,9 @@ class MainWindow(QMainWindow): | |||
| 2549 | def NewCallGraph(self): | 2750 | def NewCallGraph(self): |
| 2550 | CallGraphWindow(self.glb, self) | 2751 | CallGraphWindow(self.glb, self) |
| 2551 | 2752 | ||
| 2753 | def NewCallTree(self): | ||
| 2754 | CallTreeWindow(self.glb, self) | ||
| 2755 | |||
| 2552 | def NewTopCalls(self): | 2756 | def NewTopCalls(self): |
| 2553 | dialog = TopCallsDialog(self.glb, self) | 2757 | dialog = TopCallsDialog(self.glb, self) |
| 2554 | ret = dialog.exec_() | 2758 | ret = dialog.exec_() |
diff --git a/tools/perf/scripts/python/failed-syscalls-by-pid.py b/tools/perf/scripts/python/failed-syscalls-by-pid.py index 3648e8b986ec..310efe5e7e23 100644 --- a/tools/perf/scripts/python/failed-syscalls-by-pid.py +++ b/tools/perf/scripts/python/failed-syscalls-by-pid.py | |||
| @@ -58,22 +58,22 @@ def syscalls__sys_exit(event_name, context, common_cpu, | |||
| 58 | raw_syscalls__sys_exit(**locals()) | 58 | raw_syscalls__sys_exit(**locals()) |
| 59 | 59 | ||
| 60 | def print_error_totals(): | 60 | def print_error_totals(): |
| 61 | if for_comm is not None: | 61 | if for_comm is not None: |
| 62 | print("\nsyscall errors for %s:\n" % (for_comm)) | 62 | print("\nsyscall errors for %s:\n" % (for_comm)) |
| 63 | else: | 63 | else: |
| 64 | print("\nsyscall errors:\n") | 64 | print("\nsyscall errors:\n") |
| 65 | 65 | ||
| 66 | print("%-30s %10s" % ("comm [pid]", "count")) | 66 | print("%-30s %10s" % ("comm [pid]", "count")) |
| 67 | print("%-30s %10s" % ("------------------------------", "----------")) | 67 | print("%-30s %10s" % ("------------------------------", "----------")) |
| 68 | 68 | ||
| 69 | comm_keys = syscalls.keys() | 69 | comm_keys = syscalls.keys() |
| 70 | for comm in comm_keys: | 70 | for comm in comm_keys: |
| 71 | pid_keys = syscalls[comm].keys() | 71 | pid_keys = syscalls[comm].keys() |
| 72 | for pid in pid_keys: | 72 | for pid in pid_keys: |
| 73 | print("\n%s [%d]" % (comm, pid)) | 73 | print("\n%s [%d]" % (comm, pid)) |
| 74 | id_keys = syscalls[comm][pid].keys() | 74 | id_keys = syscalls[comm][pid].keys() |
| 75 | for id in id_keys: | 75 | for id in id_keys: |
| 76 | print(" syscall: %-16s" % syscall_name(id)) | 76 | print(" syscall: %-16s" % syscall_name(id)) |
| 77 | ret_keys = syscalls[comm][pid][id].keys() | 77 | ret_keys = syscalls[comm][pid][id].keys() |
| 78 | for ret, val in sorted(syscalls[comm][pid][id].items(), key = lambda kv: (kv[1], kv[0]), reverse = True): | 78 | for ret, val in sorted(syscalls[comm][pid][id].items(), key = lambda kv: (kv[1], kv[0]), reverse = True): |
| 79 | print(" err = %-20s %10d" % (strerror(ret), val)) | 79 | print(" err = %-20s %10d" % (strerror(ret), val)) |
diff --git a/tools/perf/scripts/python/futex-contention.py b/tools/perf/scripts/python/futex-contention.py index 0f5cf437b602..0c4841acf75d 100644 --- a/tools/perf/scripts/python/futex-contention.py +++ b/tools/perf/scripts/python/futex-contention.py | |||
| @@ -10,6 +10,8 @@ | |||
| 10 | # | 10 | # |
| 11 | # Measures futex contention | 11 | # Measures futex contention |
| 12 | 12 | ||
| 13 | from __future__ import print_function | ||
| 14 | |||
| 13 | import os, sys | 15 | import os, sys |
| 14 | sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') | 16 | sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') |
| 15 | from Util import * | 17 | from Util import * |
| @@ -33,18 +35,18 @@ def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, callchain, | |||
| 33 | 35 | ||
| 34 | def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain, | 36 | def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain, |
| 35 | nr, ret): | 37 | nr, ret): |
| 36 | if thread_blocktime.has_key(tid): | 38 | if tid in thread_blocktime: |
| 37 | elapsed = nsecs(s, ns) - thread_blocktime[tid] | 39 | elapsed = nsecs(s, ns) - thread_blocktime[tid] |
| 38 | add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed) | 40 | add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed) |
| 39 | del thread_blocktime[tid] | 41 | del thread_blocktime[tid] |
| 40 | del thread_thislock[tid] | 42 | del thread_thislock[tid] |
| 41 | 43 | ||
| 42 | def trace_begin(): | 44 | def trace_begin(): |
| 43 | print "Press control+C to stop and show the summary" | 45 | print("Press control+C to stop and show the summary") |
| 44 | 46 | ||
| 45 | def trace_end(): | 47 | def trace_end(): |
| 46 | for (tid, lock) in lock_waits: | 48 | for (tid, lock) in lock_waits: |
| 47 | min, max, avg, count = lock_waits[tid, lock] | 49 | min, max, avg, count = lock_waits[tid, lock] |
| 48 | print "%s[%d] lock %x contended %d times, %d avg ns" % \ | 50 | print("%s[%d] lock %x contended %d times, %d avg ns" % |
| 49 | (process_names[tid], tid, lock, count, avg) | 51 | (process_names[tid], tid, lock, count, avg)) |
| 50 | 52 | ||
diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py index b19172d673af..a73847c8f548 100644 --- a/tools/perf/scripts/python/intel-pt-events.py +++ b/tools/perf/scripts/python/intel-pt-events.py | |||
| @@ -10,6 +10,8 @@ | |||
| 10 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 10 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 11 | # more details. | 11 | # more details. |
| 12 | 12 | ||
| 13 | from __future__ import print_function | ||
| 14 | |||
| 13 | import os | 15 | import os |
| 14 | import sys | 16 | import sys |
| 15 | import struct | 17 | import struct |
| @@ -22,34 +24,34 @@ sys.path.append(os.environ['PERF_EXEC_PATH'] + \ | |||
| 22 | #from Core import * | 24 | #from Core import * |
| 23 | 25 | ||
| 24 | def trace_begin(): | 26 | def trace_begin(): |
| 25 | print "Intel PT Power Events and PTWRITE" | 27 | print("Intel PT Power Events and PTWRITE") |
| 26 | 28 | ||
| 27 | def trace_end(): | 29 | def trace_end(): |
| 28 | print "End" | 30 | print("End") |
| 29 | 31 | ||
| 30 | def trace_unhandled(event_name, context, event_fields_dict): | 32 | def trace_unhandled(event_name, context, event_fields_dict): |
| 31 | print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]) | 33 | print(' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])) |
| 32 | 34 | ||
| 33 | def print_ptwrite(raw_buf): | 35 | def print_ptwrite(raw_buf): |
| 34 | data = struct.unpack_from("<IQ", raw_buf) | 36 | data = struct.unpack_from("<IQ", raw_buf) |
| 35 | flags = data[0] | 37 | flags = data[0] |
| 36 | payload = data[1] | 38 | payload = data[1] |
| 37 | exact_ip = flags & 1 | 39 | exact_ip = flags & 1 |
| 38 | print "IP: %u payload: %#x" % (exact_ip, payload), | 40 | print("IP: %u payload: %#x" % (exact_ip, payload), end=' ') |
| 39 | 41 | ||
| 40 | def print_cbr(raw_buf): | 42 | def print_cbr(raw_buf): |
| 41 | data = struct.unpack_from("<BBBBII", raw_buf) | 43 | data = struct.unpack_from("<BBBBII", raw_buf) |
| 42 | cbr = data[0] | 44 | cbr = data[0] |
| 43 | f = (data[4] + 500) / 1000 | 45 | f = (data[4] + 500) / 1000 |
| 44 | p = ((cbr * 1000 / data[2]) + 5) / 10 | 46 | p = ((cbr * 1000 / data[2]) + 5) / 10 |
| 45 | print "%3u freq: %4u MHz (%3u%%)" % (cbr, f, p), | 47 | print("%3u freq: %4u MHz (%3u%%)" % (cbr, f, p), end=' ') |
| 46 | 48 | ||
| 47 | def print_mwait(raw_buf): | 49 | def print_mwait(raw_buf): |
| 48 | data = struct.unpack_from("<IQ", raw_buf) | 50 | data = struct.unpack_from("<IQ", raw_buf) |
| 49 | payload = data[1] | 51 | payload = data[1] |
| 50 | hints = payload & 0xff | 52 | hints = payload & 0xff |
| 51 | extensions = (payload >> 32) & 0x3 | 53 | extensions = (payload >> 32) & 0x3 |
| 52 | print "hints: %#x extensions: %#x" % (hints, extensions), | 54 | print("hints: %#x extensions: %#x" % (hints, extensions), end=' ') |
| 53 | 55 | ||
| 54 | def print_pwre(raw_buf): | 56 | def print_pwre(raw_buf): |
| 55 | data = struct.unpack_from("<IQ", raw_buf) | 57 | data = struct.unpack_from("<IQ", raw_buf) |
| @@ -57,13 +59,14 @@ def print_pwre(raw_buf): | |||
| 57 | hw = (payload >> 7) & 1 | 59 | hw = (payload >> 7) & 1 |
| 58 | cstate = (payload >> 12) & 0xf | 60 | cstate = (payload >> 12) & 0xf |
| 59 | subcstate = (payload >> 8) & 0xf | 61 | subcstate = (payload >> 8) & 0xf |
| 60 | print "hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate), | 62 | print("hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate), |
| 63 | end=' ') | ||
| 61 | 64 | ||
| 62 | def print_exstop(raw_buf): | 65 | def print_exstop(raw_buf): |
| 63 | data = struct.unpack_from("<I", raw_buf) | 66 | data = struct.unpack_from("<I", raw_buf) |
| 64 | flags = data[0] | 67 | flags = data[0] |
| 65 | exact_ip = flags & 1 | 68 | exact_ip = flags & 1 |
| 66 | print "IP: %u" % (exact_ip), | 69 | print("IP: %u" % (exact_ip), end=' ') |
| 67 | 70 | ||
| 68 | def print_pwrx(raw_buf): | 71 | def print_pwrx(raw_buf): |
| 69 | data = struct.unpack_from("<IQ", raw_buf) | 72 | data = struct.unpack_from("<IQ", raw_buf) |
| @@ -71,36 +74,39 @@ def print_pwrx(raw_buf): | |||
| 71 | deepest_cstate = payload & 0xf | 74 | deepest_cstate = payload & 0xf |
| 72 | last_cstate = (payload >> 4) & 0xf | 75 | last_cstate = (payload >> 4) & 0xf |
| 73 | wake_reason = (payload >> 8) & 0xf | 76 | wake_reason = (payload >> 8) & 0xf |
| 74 | print "deepest cstate: %u last cstate: %u wake reason: %#x" % (deepest_cstate, last_cstate, wake_reason), | 77 | print("deepest cstate: %u last cstate: %u wake reason: %#x" % |
| 78 | (deepest_cstate, last_cstate, wake_reason), end=' ') | ||
| 75 | 79 | ||
| 76 | def print_common_start(comm, sample, name): | 80 | def print_common_start(comm, sample, name): |
| 77 | ts = sample["time"] | 81 | ts = sample["time"] |
| 78 | cpu = sample["cpu"] | 82 | cpu = sample["cpu"] |
| 79 | pid = sample["pid"] | 83 | pid = sample["pid"] |
| 80 | tid = sample["tid"] | 84 | tid = sample["tid"] |
| 81 | print "%16s %5u/%-5u [%03u] %9u.%09u %7s:" % (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name), | 85 | print("%16s %5u/%-5u [%03u] %9u.%09u %7s:" % |
| 86 | (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name), | ||
| 87 | end=' ') | ||
| 82 | 88 | ||
| 83 | def print_common_ip(sample, symbol, dso): | 89 | def print_common_ip(sample, symbol, dso): |
| 84 | ip = sample["ip"] | 90 | ip = sample["ip"] |
| 85 | print "%16x %s (%s)" % (ip, symbol, dso) | 91 | print("%16x %s (%s)" % (ip, symbol, dso)) |
| 86 | 92 | ||
| 87 | def process_event(param_dict): | 93 | def process_event(param_dict): |
| 88 | event_attr = param_dict["attr"] | 94 | event_attr = param_dict["attr"] |
| 89 | sample = param_dict["sample"] | 95 | sample = param_dict["sample"] |
| 90 | raw_buf = param_dict["raw_buf"] | 96 | raw_buf = param_dict["raw_buf"] |
| 91 | comm = param_dict["comm"] | 97 | comm = param_dict["comm"] |
| 92 | name = param_dict["ev_name"] | 98 | name = param_dict["ev_name"] |
| 93 | 99 | ||
| 94 | # Symbol and dso info are not always resolved | 100 | # Symbol and dso info are not always resolved |
| 95 | if (param_dict.has_key("dso")): | 101 | if "dso" in param_dict: |
| 96 | dso = param_dict["dso"] | 102 | dso = param_dict["dso"] |
| 97 | else: | 103 | else: |
| 98 | dso = "[unknown]" | 104 | dso = "[unknown]" |
| 99 | 105 | ||
| 100 | if (param_dict.has_key("symbol")): | 106 | if "symbol" in param_dict: |
| 101 | symbol = param_dict["symbol"] | 107 | symbol = param_dict["symbol"] |
| 102 | else: | 108 | else: |
| 103 | symbol = "[unknown]" | 109 | symbol = "[unknown]" |
| 104 | 110 | ||
| 105 | if name == "ptwrite": | 111 | if name == "ptwrite": |
| 106 | print_common_start(comm, sample, name) | 112 | print_common_start(comm, sample, name) |
diff --git a/tools/perf/scripts/python/mem-phys-addr.py b/tools/perf/scripts/python/mem-phys-addr.py index fb0bbcbfa0f0..1f332e72b9b0 100644 --- a/tools/perf/scripts/python/mem-phys-addr.py +++ b/tools/perf/scripts/python/mem-phys-addr.py | |||
| @@ -44,12 +44,13 @@ def print_memory_type(): | |||
| 44 | print("%-40s %10s %10s\n" % ("Memory type", "count", "percentage"), end='') | 44 | print("%-40s %10s %10s\n" % ("Memory type", "count", "percentage"), end='') |
| 45 | print("%-40s %10s %10s\n" % ("----------------------------------------", | 45 | print("%-40s %10s %10s\n" % ("----------------------------------------", |
| 46 | "-----------", "-----------"), | 46 | "-----------", "-----------"), |
| 47 | end=''); | 47 | end=''); |
| 48 | total = sum(load_mem_type_cnt.values()) | 48 | total = sum(load_mem_type_cnt.values()) |
| 49 | for mem_type, count in sorted(load_mem_type_cnt.most_common(), \ | 49 | for mem_type, count in sorted(load_mem_type_cnt.most_common(), \ |
| 50 | key = lambda kv: (kv[1], kv[0]), reverse = True): | 50 | key = lambda kv: (kv[1], kv[0]), reverse = True): |
| 51 | print("%-40s %10d %10.1f%%\n" % (mem_type, count, 100 * count / total), | 51 | print("%-40s %10d %10.1f%%\n" % |
| 52 | end='') | 52 | (mem_type, count, 100 * count / total), |
| 53 | end='') | ||
| 53 | 54 | ||
| 54 | def trace_begin(): | 55 | def trace_begin(): |
| 55 | parse_iomem() | 56 | parse_iomem() |
diff --git a/tools/perf/scripts/python/net_dropmonitor.py b/tools/perf/scripts/python/net_dropmonitor.py index 212557a02c50..101059971738 100755 --- a/tools/perf/scripts/python/net_dropmonitor.py +++ b/tools/perf/scripts/python/net_dropmonitor.py | |||
| @@ -7,7 +7,7 @@ import os | |||
| 7 | import sys | 7 | import sys |
| 8 | 8 | ||
| 9 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ | 9 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ |
| 10 | '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') | 10 | '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') |
| 11 | 11 | ||
| 12 | from perf_trace_context import * | 12 | from perf_trace_context import * |
| 13 | from Core import * | 13 | from Core import * |
diff --git a/tools/perf/scripts/python/netdev-times.py b/tools/perf/scripts/python/netdev-times.py index 267bda49325d..ea0c8b90a783 100644 --- a/tools/perf/scripts/python/netdev-times.py +++ b/tools/perf/scripts/python/netdev-times.py | |||
| @@ -124,14 +124,16 @@ def print_receive(hunk): | |||
| 124 | event = event_list[i] | 124 | event = event_list[i] |
| 125 | if event['event_name'] == 'napi_poll': | 125 | if event['event_name'] == 'napi_poll': |
| 126 | print(PF_NAPI_POLL % | 126 | print(PF_NAPI_POLL % |
| 127 | (diff_msec(base_t, event['event_t']), event['dev'])) | 127 | (diff_msec(base_t, event['event_t']), |
| 128 | event['dev'])) | ||
| 128 | if i == len(event_list) - 1: | 129 | if i == len(event_list) - 1: |
| 129 | print("") | 130 | print("") |
| 130 | else: | 131 | else: |
| 131 | print(PF_JOINT) | 132 | print(PF_JOINT) |
| 132 | else: | 133 | else: |
| 133 | print(PF_NET_RECV % | 134 | print(PF_NET_RECV % |
| 134 | (diff_msec(base_t, event['event_t']), event['skbaddr'], | 135 | (diff_msec(base_t, event['event_t']), |
| 136 | event['skbaddr'], | ||
| 135 | event['len'])) | 137 | event['len'])) |
| 136 | if 'comm' in event.keys(): | 138 | if 'comm' in event.keys(): |
| 137 | print(PF_WJOINT) | 139 | print(PF_WJOINT) |
| @@ -256,7 +258,7 @@ def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, i | |||
| 256 | all_event_list.append(event_info) | 258 | all_event_list.append(event_info) |
| 257 | 259 | ||
| 258 | def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi, | 260 | def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi, |
| 259 | dev_name, work=None, budget=None): | 261 | dev_name, work=None, budget=None): |
| 260 | event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, | 262 | event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, |
| 261 | napi, dev_name, work, budget) | 263 | napi, dev_name, work, budget) |
| 262 | all_event_list.append(event_info) | 264 | all_event_list.append(event_info) |
| @@ -353,7 +355,7 @@ def handle_irq_softirq_exit(event_info): | |||
| 353 | if irq_list == [] or event_list == 0: | 355 | if irq_list == [] or event_list == 0: |
| 354 | return | 356 | return |
| 355 | rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time, | 357 | rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time, |
| 356 | 'irq_list':irq_list, 'event_list':event_list} | 358 | 'irq_list':irq_list, 'event_list':event_list} |
| 357 | # merge information realted to a NET_RX softirq | 359 | # merge information realted to a NET_RX softirq |
| 358 | receive_hunk_list.append(rec_data) | 360 | receive_hunk_list.append(rec_data) |
| 359 | 361 | ||
| @@ -390,7 +392,7 @@ def handle_netif_receive_skb(event_info): | |||
| 390 | skbaddr, skblen, dev_name) = event_info | 392 | skbaddr, skblen, dev_name) = event_info |
| 391 | if cpu in net_rx_dic.keys(): | 393 | if cpu in net_rx_dic.keys(): |
| 392 | rec_data = {'event_name':'netif_receive_skb', | 394 | rec_data = {'event_name':'netif_receive_skb', |
| 393 | 'event_t':time, 'skbaddr':skbaddr, 'len':skblen} | 395 | 'event_t':time, 'skbaddr':skbaddr, 'len':skblen} |
| 394 | event_list = net_rx_dic[cpu]['event_list'] | 396 | event_list = net_rx_dic[cpu]['event_list'] |
| 395 | event_list.append(rec_data) | 397 | event_list.append(rec_data) |
| 396 | rx_skb_list.insert(0, rec_data) | 398 | rx_skb_list.insert(0, rec_data) |
diff --git a/tools/perf/scripts/python/sched-migration.py b/tools/perf/scripts/python/sched-migration.py index 3984bf51f3c5..8196e3087c9e 100644 --- a/tools/perf/scripts/python/sched-migration.py +++ b/tools/perf/scripts/python/sched-migration.py | |||
| @@ -14,10 +14,10 @@ import sys | |||
| 14 | 14 | ||
| 15 | from collections import defaultdict | 15 | from collections import defaultdict |
| 16 | try: | 16 | try: |
| 17 | from UserList import UserList | 17 | from UserList import UserList |
| 18 | except ImportError: | 18 | except ImportError: |
| 19 | # Python 3: UserList moved to the collections package | 19 | # Python 3: UserList moved to the collections package |
| 20 | from collections import UserList | 20 | from collections import UserList |
| 21 | 21 | ||
| 22 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ | 22 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ |
| 23 | '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') | 23 | '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') |
diff --git a/tools/perf/scripts/python/sctop.py b/tools/perf/scripts/python/sctop.py index 987ffae7c8ca..6e0278dcb092 100644 --- a/tools/perf/scripts/python/sctop.py +++ b/tools/perf/scripts/python/sctop.py | |||
| @@ -13,9 +13,9 @@ from __future__ import print_function | |||
| 13 | import os, sys, time | 13 | import os, sys, time |
| 14 | 14 | ||
| 15 | try: | 15 | try: |
| 16 | import thread | 16 | import thread |
| 17 | except ImportError: | 17 | except ImportError: |
| 18 | import _thread as thread | 18 | import _thread as thread |
| 19 | 19 | ||
| 20 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ | 20 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ |
| 21 | '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') | 21 | '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') |
| @@ -75,11 +75,12 @@ def print_syscall_totals(interval): | |||
| 75 | 75 | ||
| 76 | print("%-40s %10s" % ("event", "count")) | 76 | print("%-40s %10s" % ("event", "count")) |
| 77 | print("%-40s %10s" % | 77 | print("%-40s %10s" % |
| 78 | ("----------------------------------------", | 78 | ("----------------------------------------", |
| 79 | "----------")) | 79 | "----------")) |
| 80 | 80 | ||
| 81 | for id, val in sorted(syscalls.items(), key = lambda kv: (kv[1], kv[0]), \ | 81 | for id, val in sorted(syscalls.items(), |
| 82 | reverse = True): | 82 | key = lambda kv: (kv[1], kv[0]), |
| 83 | reverse = True): | ||
| 83 | try: | 84 | try: |
| 84 | print("%-40s %10d" % (syscall_name(id), val)) | 85 | print("%-40s %10d" % (syscall_name(id), val)) |
| 85 | except TypeError: | 86 | except TypeError: |
diff --git a/tools/perf/scripts/python/stackcollapse.py b/tools/perf/scripts/python/stackcollapse.py index 5e703efaddcc..b1c4def1410a 100755 --- a/tools/perf/scripts/python/stackcollapse.py +++ b/tools/perf/scripts/python/stackcollapse.py | |||
| @@ -27,7 +27,7 @@ from collections import defaultdict | |||
| 27 | from optparse import OptionParser, make_option | 27 | from optparse import OptionParser, make_option |
| 28 | 28 | ||
| 29 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ | 29 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ |
| 30 | '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') | 30 | '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') |
| 31 | 31 | ||
| 32 | from perf_trace_context import * | 32 | from perf_trace_context import * |
| 33 | from Core import * | 33 | from Core import * |
diff --git a/tools/perf/scripts/python/syscall-counts-by-pid.py b/tools/perf/scripts/python/syscall-counts-by-pid.py index 42782487b0e9..f254e40c6f0f 100644 --- a/tools/perf/scripts/python/syscall-counts-by-pid.py +++ b/tools/perf/scripts/python/syscall-counts-by-pid.py | |||
| @@ -39,11 +39,10 @@ def trace_end(): | |||
| 39 | print_syscall_totals() | 39 | print_syscall_totals() |
| 40 | 40 | ||
| 41 | def raw_syscalls__sys_enter(event_name, context, common_cpu, | 41 | def raw_syscalls__sys_enter(event_name, context, common_cpu, |
| 42 | common_secs, common_nsecs, common_pid, common_comm, | 42 | common_secs, common_nsecs, common_pid, common_comm, |
| 43 | common_callchain, id, args): | 43 | common_callchain, id, args): |
| 44 | |||
| 45 | if (for_comm and common_comm != for_comm) or \ | 44 | if (for_comm and common_comm != for_comm) or \ |
| 46 | (for_pid and common_pid != for_pid ): | 45 | (for_pid and common_pid != for_pid ): |
| 47 | return | 46 | return |
| 48 | try: | 47 | try: |
| 49 | syscalls[common_comm][common_pid][id] += 1 | 48 | syscalls[common_comm][common_pid][id] += 1 |
| @@ -51,26 +50,26 @@ def raw_syscalls__sys_enter(event_name, context, common_cpu, | |||
| 51 | syscalls[common_comm][common_pid][id] = 1 | 50 | syscalls[common_comm][common_pid][id] = 1 |
| 52 | 51 | ||
| 53 | def syscalls__sys_enter(event_name, context, common_cpu, | 52 | def syscalls__sys_enter(event_name, context, common_cpu, |
| 54 | common_secs, common_nsecs, common_pid, common_comm, | 53 | common_secs, common_nsecs, common_pid, common_comm, |
| 55 | id, args): | 54 | id, args): |
| 56 | raw_syscalls__sys_enter(**locals()) | 55 | raw_syscalls__sys_enter(**locals()) |
| 57 | 56 | ||
| 58 | def print_syscall_totals(): | 57 | def print_syscall_totals(): |
| 59 | if for_comm is not None: | 58 | if for_comm is not None: |
| 60 | print("\nsyscall events for %s:\n" % (for_comm)) | 59 | print("\nsyscall events for %s:\n" % (for_comm)) |
| 61 | else: | 60 | else: |
| 62 | print("\nsyscall events by comm/pid:\n") | 61 | print("\nsyscall events by comm/pid:\n") |
| 63 | 62 | ||
| 64 | print("%-40s %10s" % ("comm [pid]/syscalls", "count")) | 63 | print("%-40s %10s" % ("comm [pid]/syscalls", "count")) |
| 65 | print("%-40s %10s" % ("----------------------------------------", | 64 | print("%-40s %10s" % ("----------------------------------------", |
| 66 | "----------")) | 65 | "----------")) |
| 67 | 66 | ||
| 68 | comm_keys = syscalls.keys() | 67 | comm_keys = syscalls.keys() |
| 69 | for comm in comm_keys: | 68 | for comm in comm_keys: |
| 70 | pid_keys = syscalls[comm].keys() | 69 | pid_keys = syscalls[comm].keys() |
| 71 | for pid in pid_keys: | 70 | for pid in pid_keys: |
| 72 | print("\n%s [%d]" % (comm, pid)) | 71 | print("\n%s [%d]" % (comm, pid)) |
| 73 | id_keys = syscalls[comm][pid].keys() | 72 | id_keys = syscalls[comm][pid].keys() |
| 74 | for id, val in sorted(syscalls[comm][pid].items(), \ | 73 | for id, val in sorted(syscalls[comm][pid].items(), |
| 75 | key = lambda kv: (kv[1], kv[0]), reverse = True): | 74 | key = lambda kv: (kv[1], kv[0]), reverse = True): |
| 76 | print(" %-38s %10d" % (syscall_name(id), val)) | 75 | print(" %-38s %10d" % (syscall_name(id), val)) |
diff --git a/tools/perf/scripts/python/syscall-counts.py b/tools/perf/scripts/python/syscall-counts.py index 0ebd89cfd42c..8adb95ff1664 100644 --- a/tools/perf/scripts/python/syscall-counts.py +++ b/tools/perf/scripts/python/syscall-counts.py | |||
| @@ -36,8 +36,8 @@ def trace_end(): | |||
| 36 | print_syscall_totals() | 36 | print_syscall_totals() |
| 37 | 37 | ||
| 38 | def raw_syscalls__sys_enter(event_name, context, common_cpu, | 38 | def raw_syscalls__sys_enter(event_name, context, common_cpu, |
| 39 | common_secs, common_nsecs, common_pid, common_comm, | 39 | common_secs, common_nsecs, common_pid, common_comm, |
| 40 | common_callchain, id, args): | 40 | common_callchain, id, args): |
| 41 | if for_comm is not None: | 41 | if for_comm is not None: |
| 42 | if common_comm != for_comm: | 42 | if common_comm != for_comm: |
| 43 | return | 43 | return |
| @@ -47,20 +47,19 @@ def raw_syscalls__sys_enter(event_name, context, common_cpu, | |||
| 47 | syscalls[id] = 1 | 47 | syscalls[id] = 1 |
| 48 | 48 | ||
| 49 | def syscalls__sys_enter(event_name, context, common_cpu, | 49 | def syscalls__sys_enter(event_name, context, common_cpu, |
| 50 | common_secs, common_nsecs, common_pid, common_comm, | 50 | common_secs, common_nsecs, common_pid, common_comm, id, args): |
| 51 | id, args): | ||
| 52 | raw_syscalls__sys_enter(**locals()) | 51 | raw_syscalls__sys_enter(**locals()) |
| 53 | 52 | ||
| 54 | def print_syscall_totals(): | 53 | def print_syscall_totals(): |
| 55 | if for_comm is not None: | 54 | if for_comm is not None: |
| 56 | print("\nsyscall events for %s:\n" % (for_comm)) | 55 | print("\nsyscall events for %s:\n" % (for_comm)) |
| 57 | else: | 56 | else: |
| 58 | print("\nsyscall events:\n") | 57 | print("\nsyscall events:\n") |
| 59 | 58 | ||
| 60 | print("%-40s %10s" % ("event", "count")) | 59 | print("%-40s %10s" % ("event", "count")) |
| 61 | print("%-40s %10s" % ("----------------------------------------", | 60 | print("%-40s %10s" % ("----------------------------------------", |
| 62 | "-----------")) | 61 | "-----------")) |
| 63 | 62 | ||
| 64 | for id, val in sorted(syscalls.items(), key = lambda kv: (kv[1], kv[0]), \ | 63 | for id, val in sorted(syscalls.items(), |
| 65 | reverse = True): | 64 | key = lambda kv: (kv[1], kv[0]), reverse = True): |
| 66 | print("%-40s %10d" % (syscall_name(id), val)) | 65 | print("%-40s %10d" % (syscall_name(id), val)) |
