aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/scripts/python
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2017-11-13 20:10:13 -0500
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2017-11-13 20:10:13 -0500
commitc25141062a82ae8bddced1b3ce2b57a1c0efabe0 (patch)
tree105edf10059bc0c4f2f00338b0c861b813d1bb1a /tools/perf/scripts/python
parent26dd633e437dca218547ccbeacc71fe8a620b6f6 (diff)
parentc1b433e04ef9c0a1c4d65bfe918472ffa334dff4 (diff)
Merge branch 'next' into for-linus
Prepare input updates for 4.15 merge window.
Diffstat (limited to 'tools/perf/scripts/python')
-rwxr-xr-xtools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py1
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py1
-rw-r--r--tools/perf/scripts/python/bin/export-to-sqlite-record8
-rw-r--r--tools/perf/scripts/python/bin/export-to-sqlite-report29
-rw-r--r--tools/perf/scripts/python/bin/intel-pt-events-record13
-rw-r--r--tools/perf/scripts/python/bin/intel-pt-events-report3
-rw-r--r--tools/perf/scripts/python/call-graph-from-sql.py (renamed from tools/perf/scripts/python/call-graph-from-postgresql.py)70
-rw-r--r--tools/perf/scripts/python/event_analyzing_sample.py1
-rw-r--r--tools/perf/scripts/python/export-to-postgresql.py5
-rw-r--r--tools/perf/scripts/python/export-to-sqlite.py451
-rw-r--r--tools/perf/scripts/python/intel-pt-events.py128
-rwxr-xr-xtools/perf/scripts/python/net_dropmonitor.py1
-rw-r--r--tools/perf/scripts/python/netdev-times.py1
-rwxr-xr-xtools/perf/scripts/python/stackcollapse.py1
-rw-r--r--tools/perf/scripts/python/stat-cpi.py1
15 files changed, 683 insertions, 31 deletions
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
index 9e0985794e20..81a56cd2b3c1 100755
--- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
@@ -1,4 +1,5 @@
1# EventClass.py 1# EventClass.py
2# SPDX-License-Identifier: GPL-2.0
2# 3#
3# This is a library defining some events types classes, which could 4# This is a library defining some events types classes, which could
4# be used by other scripts to analyzing the perf samples. 5# be used by other scripts to analyzing the perf samples.
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
index 1d95009592eb..f6c84966e4f8 100644
--- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
@@ -57,6 +57,7 @@ try:
57 'ia64' : audit.MACH_IA64, 57 'ia64' : audit.MACH_IA64,
58 'ppc' : audit.MACH_PPC, 58 'ppc' : audit.MACH_PPC,
59 'ppc64' : audit.MACH_PPC64, 59 'ppc64' : audit.MACH_PPC64,
60 'ppc64le' : audit.MACH_PPC64LE,
60 's390' : audit.MACH_S390, 61 's390' : audit.MACH_S390,
61 's390x' : audit.MACH_S390X, 62 's390x' : audit.MACH_S390X,
62 'i386' : audit.MACH_X86, 63 'i386' : audit.MACH_X86,
diff --git a/tools/perf/scripts/python/bin/export-to-sqlite-record b/tools/perf/scripts/python/bin/export-to-sqlite-record
new file mode 100644
index 000000000000..070204fd6d00
--- /dev/null
+++ b/tools/perf/scripts/python/bin/export-to-sqlite-record
@@ -0,0 +1,8 @@
1#!/bin/bash
2
3#
4# export perf data to a sqlite3 database. Can cover
5# perf ip samples (excluding the tracepoints). No special
6# record requirements, just record what you want to export.
7#
8perf record $@
diff --git a/tools/perf/scripts/python/bin/export-to-sqlite-report b/tools/perf/scripts/python/bin/export-to-sqlite-report
new file mode 100644
index 000000000000..5ff6033e70ba
--- /dev/null
+++ b/tools/perf/scripts/python/bin/export-to-sqlite-report
@@ -0,0 +1,29 @@
1#!/bin/bash
2# description: export perf data to a sqlite3 database
3# args: [database name] [columns] [calls]
4n_args=0
5for i in "$@"
6do
7 if expr match "$i" "-" > /dev/null ; then
8 break
9 fi
10 n_args=$(( $n_args + 1 ))
11done
12if [ "$n_args" -gt 3 ] ; then
13 echo "usage: export-to-sqlite-report [database name] [columns] [calls]"
14 exit
15fi
16if [ "$n_args" -gt 2 ] ; then
17 dbname=$1
18 columns=$2
19 calls=$3
20 shift 3
21elif [ "$n_args" -gt 1 ] ; then
22 dbname=$1
23 columns=$2
24 shift 2
25elif [ "$n_args" -gt 0 ] ; then
26 dbname=$1
27 shift
28fi
29perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/export-to-sqlite.py $dbname $columns $calls
diff --git a/tools/perf/scripts/python/bin/intel-pt-events-record b/tools/perf/scripts/python/bin/intel-pt-events-record
new file mode 100644
index 000000000000..10fe2b6977d4
--- /dev/null
+++ b/tools/perf/scripts/python/bin/intel-pt-events-record
@@ -0,0 +1,13 @@
1#!/bin/bash
2
3#
4# print Intel PT Power Events and PTWRITE. The intel_pt PMU event needs
5# to be specified with appropriate config terms.
6#
7if ! echo "$@" | grep -q intel_pt ; then
8 echo "Options must include the Intel PT event e.g. -e intel_pt/pwr_evt,ptw/"
9 echo "and for power events it probably needs to be system wide i.e. -a option"
10 echo "For example: -a -e intel_pt/pwr_evt,branch=0/ sleep 1"
11 exit 1
12fi
13perf record $@
diff --git a/tools/perf/scripts/python/bin/intel-pt-events-report b/tools/perf/scripts/python/bin/intel-pt-events-report
new file mode 100644
index 000000000000..9a9c92fcd026
--- /dev/null
+++ b/tools/perf/scripts/python/bin/intel-pt-events-report
@@ -0,0 +1,3 @@
1#!/bin/bash
2# description: print Intel PT Power Events and PTWRITE
3perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/intel-pt-events.py \ No newline at end of file
diff --git a/tools/perf/scripts/python/call-graph-from-postgresql.py b/tools/perf/scripts/python/call-graph-from-sql.py
index e78fdc2a5a9d..b494a67a1c67 100644
--- a/tools/perf/scripts/python/call-graph-from-postgresql.py
+++ b/tools/perf/scripts/python/call-graph-from-sql.py
@@ -1,6 +1,6 @@
1#!/usr/bin/python2 1#!/usr/bin/python2
2# call-graph-from-postgresql.py: create call-graph from postgresql database 2# call-graph-from-sql.py: create call-graph from sql database
3# Copyright (c) 2014, Intel Corporation. 3# Copyright (c) 2014-2017, Intel Corporation.
4# 4#
5# This program is free software; you can redistribute it and/or modify it 5# This program is free software; you can redistribute it and/or modify it
6# under the terms and conditions of the GNU General Public License, 6# under the terms and conditions of the GNU General Public License,
@@ -11,18 +11,19 @@
11# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12# more details. 12# more details.
13 13
14# To use this script you will need to have exported data using the 14# To use this script you will need to have exported data using either the
15# export-to-postgresql.py script. Refer to that script for details. 15# export-to-sqlite.py or the export-to-postgresql.py script. Refer to those
16# scripts for details.
16# 17#
17# Following on from the example in the export-to-postgresql.py script, a 18# Following on from the example in the export scripts, a
18# call-graph can be displayed for the pt_example database like this: 19# call-graph can be displayed for the pt_example database like this:
19# 20#
20# python tools/perf/scripts/python/call-graph-from-postgresql.py pt_example 21# python tools/perf/scripts/python/call-graph-from-sql.py pt_example
21# 22#
22# Note this script supports connecting to remote databases by setting hostname, 23# Note that for PostgreSQL, this script supports connecting to remote databases
23# port, username, password, and dbname e.g. 24# by setting hostname, port, username, password, and dbname e.g.
24# 25#
25# python tools/perf/scripts/python/call-graph-from-postgresql.py "hostname=myhost username=myuser password=mypassword dbname=pt_example" 26# python tools/perf/scripts/python/call-graph-from-sql.py "hostname=myhost username=myuser password=mypassword dbname=pt_example"
26# 27#
27# The result is a GUI window with a tree representing a context-sensitive 28# The result is a GUI window with a tree representing a context-sensitive
28# call-graph. Expanding a couple of levels of the tree and adjusting column 29# call-graph. Expanding a couple of levels of the tree and adjusting column
@@ -160,7 +161,7 @@ class TreeItem():
160 '( SELECT short_name FROM dsos WHERE id = ( SELECT dso_id FROM symbols WHERE id = ( SELECT symbol_id FROM call_paths WHERE id = call_path_id ) ) ), ' 161 '( SELECT short_name FROM dsos WHERE id = ( SELECT dso_id FROM symbols WHERE id = ( SELECT symbol_id FROM call_paths WHERE id = call_path_id ) ) ), '
161 '( SELECT ip FROM call_paths where id = call_path_id ) ' 162 '( SELECT ip FROM call_paths where id = call_path_id ) '
162 'FROM calls WHERE parent_call_path_id = ' + str(self.call_path_id) + ' AND comm_id = ' + str(self.comm_id) + ' AND thread_id = ' + str(self.thread_id) + 163 'FROM calls WHERE parent_call_path_id = ' + str(self.call_path_id) + ' AND comm_id = ' + str(self.comm_id) + ' AND thread_id = ' + str(self.thread_id) +
163 'ORDER BY call_path_id') 164 ' ORDER BY call_path_id')
164 if not ret: 165 if not ret:
165 raise Exception("Query failed: " + query.lastError().text()) 166 raise Exception("Query failed: " + query.lastError().text())
166 last_call_path_id = 0 167 last_call_path_id = 0
@@ -291,29 +292,40 @@ class MainWindow(QMainWindow):
291 292
292if __name__ == '__main__': 293if __name__ == '__main__':
293 if (len(sys.argv) < 2): 294 if (len(sys.argv) < 2):
294 print >> sys.stderr, "Usage is: call-graph-from-postgresql.py <database name>" 295 print >> sys.stderr, "Usage is: call-graph-from-sql.py <database name>"
295 raise Exception("Too few arguments") 296 raise Exception("Too few arguments")
296 297
297 dbname = sys.argv[1] 298 dbname = sys.argv[1]
298 299
299 db = QSqlDatabase.addDatabase('QPSQL') 300 is_sqlite3 = False
300 301 try:
301 opts = dbname.split() 302 f = open(dbname)
302 for opt in opts: 303 if f.read(15) == "SQLite format 3":
303 if '=' in opt: 304 is_sqlite3 = True
304 opt = opt.split('=') 305 f.close()
305 if opt[0] == 'hostname': 306 except:
306 db.setHostName(opt[1]) 307 pass
307 elif opt[0] == 'port': 308
308 db.setPort(int(opt[1])) 309 if is_sqlite3:
309 elif opt[0] == 'username': 310 db = QSqlDatabase.addDatabase('QSQLITE')
310 db.setUserName(opt[1]) 311 else:
311 elif opt[0] == 'password': 312 db = QSqlDatabase.addDatabase('QPSQL')
312 db.setPassword(opt[1]) 313 opts = dbname.split()
313 elif opt[0] == 'dbname': 314 for opt in opts:
314 dbname = opt[1] 315 if '=' in opt:
315 else: 316 opt = opt.split('=')
316 dbname = opt 317 if opt[0] == 'hostname':
318 db.setHostName(opt[1])
319 elif opt[0] == 'port':
320 db.setPort(int(opt[1]))
321 elif opt[0] == 'username':
322 db.setUserName(opt[1])
323 elif opt[0] == 'password':
324 db.setPassword(opt[1])
325 elif opt[0] == 'dbname':
326 dbname = opt[1]
327 else:
328 dbname = opt
317 329
318 db.setDatabaseName(dbname) 330 db.setDatabaseName(dbname)
319 if not db.open(): 331 if not db.open():
diff --git a/tools/perf/scripts/python/event_analyzing_sample.py b/tools/perf/scripts/python/event_analyzing_sample.py
index 163c39fa12d9..4e843b9864ec 100644
--- a/tools/perf/scripts/python/event_analyzing_sample.py
+++ b/tools/perf/scripts/python/event_analyzing_sample.py
@@ -1,4 +1,5 @@
1# event_analyzing_sample.py: general event handler in python 1# event_analyzing_sample.py: general event handler in python
2# SPDX-License-Identifier: GPL-2.0
2# 3#
3# Current perf report is already very powerful with the annotation integrated, 4# Current perf report is already very powerful with the annotation integrated,
4# and this script is not trying to be as powerful as perf report, but 5# and this script is not trying to be as powerful as perf report, but
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
index 7656ff8aa066..efcaf6cac2eb 100644
--- a/tools/perf/scripts/python/export-to-postgresql.py
+++ b/tools/perf/scripts/python/export-to-postgresql.py
@@ -59,7 +59,7 @@ import datetime
59# pt_example=# \q 59# pt_example=# \q
60# 60#
61# An example of using the database is provided by the script 61# An example of using the database is provided by the script
62# call-graph-from-postgresql.py. Refer to that script for details. 62# call-graph-from-sql.py. Refer to that script for details.
63# 63#
64# Tables: 64# Tables:
65# 65#
@@ -340,7 +340,8 @@ if branches:
340 'to_sym_offset bigint,' 340 'to_sym_offset bigint,'
341 'to_ip bigint,' 341 'to_ip bigint,'
342 'branch_type integer,' 342 'branch_type integer,'
343 'in_tx boolean)') 343 'in_tx boolean,'
344 'call_path_id bigint)')
344else: 345else:
345 do_query(query, 'CREATE TABLE samples (' 346 do_query(query, 'CREATE TABLE samples ('
346 'id bigint NOT NULL,' 347 'id bigint NOT NULL,'
diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py
new file mode 100644
index 000000000000..f827bf77e9d2
--- /dev/null
+++ b/tools/perf/scripts/python/export-to-sqlite.py
@@ -0,0 +1,451 @@
1# export-to-sqlite.py: export perf data to a sqlite3 database
2# Copyright (c) 2017, Intel Corporation.
3#
4# This program is free software; you can redistribute it and/or modify it
5# under the terms and conditions of the GNU General Public License,
6# version 2, as published by the Free Software Foundation.
7#
8# This program is distributed in the hope it will be useful, but WITHOUT
9# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11# more details.
12
13import os
14import sys
15import struct
16import datetime
17
18# To use this script you will need to have installed package python-pyside which
19# provides LGPL-licensed Python bindings for Qt. You will also need the package
20# libqt4-sql-sqlite for Qt sqlite3 support.
21#
22# An example of using this script with Intel PT:
23#
24# $ perf record -e intel_pt//u ls
25# $ perf script -s ~/libexec/perf-core/scripts/python/export-to-sqlite.py pt_example branches calls
26# 2017-07-31 14:26:07.326913 Creating database...
27# 2017-07-31 14:26:07.538097 Writing records...
28# 2017-07-31 14:26:09.889292 Adding indexes
29# 2017-07-31 14:26:09.958746 Done
30#
31# To browse the database, sqlite3 can be used e.g.
32#
33# $ sqlite3 pt_example
34# sqlite> .header on
35# sqlite> select * from samples_view where id < 10;
36# sqlite> .mode column
37# sqlite> select * from samples_view where id < 10;
38# sqlite> .tables
39# sqlite> .schema samples_view
40# sqlite> .quit
41#
42# An example of using the database is provided by the script
43# call-graph-from-sql.py. Refer to that script for details.
44#
45# The database structure is practically the same as created by the script
46# export-to-postgresql.py. Refer to that script for details. A notable
47# difference is the 'transaction' column of the 'samples' table which is
48# renamed 'transaction_' in sqlite because 'transaction' is a reserved word.
49
50from PySide.QtSql import *
51
52sys.path.append(os.environ['PERF_EXEC_PATH'] + \
53 '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
54
55# These perf imports are not used at present
56#from perf_trace_context import *
57#from Core import *
58
59perf_db_export_mode = True
60perf_db_export_calls = False
61perf_db_export_callchains = False
62
63def usage():
64 print >> sys.stderr, "Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]"
65 print >> sys.stderr, "where: columns 'all' or 'branches'"
66 print >> sys.stderr, " calls 'calls' => create calls and call_paths table"
67 print >> sys.stderr, " callchains 'callchains' => create call_paths table"
68 raise Exception("Too few arguments")
69
70if (len(sys.argv) < 2):
71 usage()
72
73dbname = sys.argv[1]
74
75if (len(sys.argv) >= 3):
76 columns = sys.argv[2]
77else:
78 columns = "all"
79
80if columns not in ("all", "branches"):
81 usage()
82
83branches = (columns == "branches")
84
85for i in range(3,len(sys.argv)):
86 if (sys.argv[i] == "calls"):
87 perf_db_export_calls = True
88 elif (sys.argv[i] == "callchains"):
89 perf_db_export_callchains = True
90 else:
91 usage()
92
93def do_query(q, s):
94 if (q.exec_(s)):
95 return
96 raise Exception("Query failed: " + q.lastError().text())
97
98def do_query_(q):
99 if (q.exec_()):
100 return
101 raise Exception("Query failed: " + q.lastError().text())
102
103print datetime.datetime.today(), "Creating database..."
104
105db_exists = False
106try:
107 f = open(dbname)
108 f.close()
109 db_exists = True
110except:
111 pass
112
113if db_exists:
114 raise Exception(dbname + " already exists")
115
116db = QSqlDatabase.addDatabase('QSQLITE')
117db.setDatabaseName(dbname)
118db.open()
119
120query = QSqlQuery(db)
121
122do_query(query, 'PRAGMA journal_mode = OFF')
123do_query(query, 'BEGIN TRANSACTION')
124
125do_query(query, 'CREATE TABLE selected_events ('
126 'id integer NOT NULL PRIMARY KEY,'
127 'name varchar(80))')
128do_query(query, 'CREATE TABLE machines ('
129 'id integer NOT NULL PRIMARY KEY,'
130 'pid integer,'
131 'root_dir varchar(4096))')
132do_query(query, 'CREATE TABLE threads ('
133 'id integer NOT NULL PRIMARY KEY,'
134 'machine_id bigint,'
135 'process_id bigint,'
136 'pid integer,'
137 'tid integer)')
138do_query(query, 'CREATE TABLE comms ('
139 'id integer NOT NULL PRIMARY KEY,'
140 'comm varchar(16))')
141do_query(query, 'CREATE TABLE comm_threads ('
142 'id integer NOT NULL PRIMARY KEY,'
143 'comm_id bigint,'
144 'thread_id bigint)')
145do_query(query, 'CREATE TABLE dsos ('
146 'id integer NOT NULL PRIMARY KEY,'
147 'machine_id bigint,'
148 'short_name varchar(256),'
149 'long_name varchar(4096),'
150 'build_id varchar(64))')
151do_query(query, 'CREATE TABLE symbols ('
152 'id integer NOT NULL PRIMARY KEY,'
153 'dso_id bigint,'
154 'sym_start bigint,'
155 'sym_end bigint,'
156 'binding integer,'
157 'name varchar(2048))')
158do_query(query, 'CREATE TABLE branch_types ('
159 'id integer NOT NULL PRIMARY KEY,'
160 'name varchar(80))')
161
162if branches:
163 do_query(query, 'CREATE TABLE samples ('
164 'id integer NOT NULL PRIMARY KEY,'
165 'evsel_id bigint,'
166 'machine_id bigint,'
167 'thread_id bigint,'
168 'comm_id bigint,'
169 'dso_id bigint,'
170 'symbol_id bigint,'
171 'sym_offset bigint,'
172 'ip bigint,'
173 'time bigint,'
174 'cpu integer,'
175 'to_dso_id bigint,'
176 'to_symbol_id bigint,'
177 'to_sym_offset bigint,'
178 'to_ip bigint,'
179 'branch_type integer,'
180 'in_tx boolean,'
181 'call_path_id bigint)')
182else:
183 do_query(query, 'CREATE TABLE samples ('
184 'id integer NOT NULL PRIMARY KEY,'
185 'evsel_id bigint,'
186 'machine_id bigint,'
187 'thread_id bigint,'
188 'comm_id bigint,'
189 'dso_id bigint,'
190 'symbol_id bigint,'
191 'sym_offset bigint,'
192 'ip bigint,'
193 'time bigint,'
194 'cpu integer,'
195 'to_dso_id bigint,'
196 'to_symbol_id bigint,'
197 'to_sym_offset bigint,'
198 'to_ip bigint,'
199 'period bigint,'
200 'weight bigint,'
201 'transaction_ bigint,'
202 'data_src bigint,'
203 'branch_type integer,'
204 'in_tx boolean,'
205 'call_path_id bigint)')
206
207if perf_db_export_calls or perf_db_export_callchains:
208 do_query(query, 'CREATE TABLE call_paths ('
209 'id integer NOT NULL PRIMARY KEY,'
210 'parent_id bigint,'
211 'symbol_id bigint,'
212 'ip bigint)')
213if perf_db_export_calls:
214 do_query(query, 'CREATE TABLE calls ('
215 'id integer NOT NULL PRIMARY KEY,'
216 'thread_id bigint,'
217 'comm_id bigint,'
218 'call_path_id bigint,'
219 'call_time bigint,'
220 'return_time bigint,'
221 'branch_count bigint,'
222 'call_id bigint,'
223 'return_id bigint,'
224 'parent_call_path_id bigint,'
225 'flags integer)')
226
227# printf was added to sqlite in version 3.8.3
228sqlite_has_printf = False
229try:
230 do_query(query, 'SELECT printf("") FROM machines')
231 sqlite_has_printf = True
232except:
233 pass
234
235def emit_to_hex(x):
236 if sqlite_has_printf:
237 return 'printf("%x", ' + x + ')'
238 else:
239 return x
240
241do_query(query, 'CREATE VIEW machines_view AS '
242 'SELECT '
243 'id,'
244 'pid,'
245 'root_dir,'
246 'CASE WHEN id=0 THEN \'unknown\' WHEN pid=-1 THEN \'host\' ELSE \'guest\' END AS host_or_guest'
247 ' FROM machines')
248
249do_query(query, 'CREATE VIEW dsos_view AS '
250 'SELECT '
251 'id,'
252 'machine_id,'
253 '(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
254 'short_name,'
255 'long_name,'
256 'build_id'
257 ' FROM dsos')
258
259do_query(query, 'CREATE VIEW symbols_view AS '
260 'SELECT '
261 'id,'
262 'name,'
263 '(SELECT short_name FROM dsos WHERE id=dso_id) AS dso,'
264 'dso_id,'
265 'sym_start,'
266 'sym_end,'
267 'CASE WHEN binding=0 THEN \'local\' WHEN binding=1 THEN \'global\' ELSE \'weak\' END AS binding'
268 ' FROM symbols')
269
270do_query(query, 'CREATE VIEW threads_view AS '
271 'SELECT '
272 'id,'
273 'machine_id,'
274 '(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
275 'process_id,'
276 'pid,'
277 'tid'
278 ' FROM threads')
279
280do_query(query, 'CREATE VIEW comm_threads_view AS '
281 'SELECT '
282 'comm_id,'
283 '(SELECT comm FROM comms WHERE id = comm_id) AS command,'
284 'thread_id,'
285 '(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
286 '(SELECT tid FROM threads WHERE id = thread_id) AS tid'
287 ' FROM comm_threads')
288
289if perf_db_export_calls or perf_db_export_callchains:
290 do_query(query, 'CREATE VIEW call_paths_view AS '
291 'SELECT '
292 'c.id,'
293 + emit_to_hex('c.ip') + ' AS ip,'
294 'c.symbol_id,'
295 '(SELECT name FROM symbols WHERE id = c.symbol_id) AS symbol,'
296 '(SELECT dso_id FROM symbols WHERE id = c.symbol_id) AS dso_id,'
297 '(SELECT dso FROM symbols_view WHERE id = c.symbol_id) AS dso_short_name,'
298 'c.parent_id,'
299 + emit_to_hex('p.ip') + ' AS parent_ip,'
300 'p.symbol_id AS parent_symbol_id,'
301 '(SELECT name FROM symbols WHERE id = p.symbol_id) AS parent_symbol,'
302 '(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,'
303 '(SELECT dso FROM symbols_view WHERE id = p.symbol_id) AS parent_dso_short_name'
304 ' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id')
305if perf_db_export_calls:
306 do_query(query, 'CREATE VIEW calls_view AS '
307 'SELECT '
308 'calls.id,'
309 'thread_id,'
310 '(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
311 '(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
312 '(SELECT comm FROM comms WHERE id = comm_id) AS command,'
313 'call_path_id,'
314 + emit_to_hex('ip') + ' AS ip,'
315 'symbol_id,'
316 '(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
317 'call_time,'
318 'return_time,'
319 'return_time - call_time AS elapsed_time,'
320 'branch_count,'
321 'call_id,'
322 'return_id,'
323 'CASE WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' ELSE \'\' END AS flags,'
324 'parent_call_path_id'
325 ' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
326
327do_query(query, 'CREATE VIEW samples_view AS '
328 'SELECT '
329 'id,'
330 'time,'
331 'cpu,'
332 '(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
333 '(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
334 '(SELECT comm FROM comms WHERE id = comm_id) AS command,'
335 '(SELECT name FROM selected_events WHERE id = evsel_id) AS event,'
336 + emit_to_hex('ip') + ' AS ip_hex,'
337 '(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
338 'sym_offset,'
339 '(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,'
340 + emit_to_hex('to_ip') + ' AS to_ip_hex,'
341 '(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,'
342 'to_sym_offset,'
343 '(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,'
344 '(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,'
345 'in_tx'
346 ' FROM samples')
347
348do_query(query, 'END TRANSACTION')
349
350evsel_query = QSqlQuery(db)
351evsel_query.prepare("INSERT INTO selected_events VALUES (?, ?)")
352machine_query = QSqlQuery(db)
353machine_query.prepare("INSERT INTO machines VALUES (?, ?, ?)")
354thread_query = QSqlQuery(db)
355thread_query.prepare("INSERT INTO threads VALUES (?, ?, ?, ?, ?)")
356comm_query = QSqlQuery(db)
357comm_query.prepare("INSERT INTO comms VALUES (?, ?)")
358comm_thread_query = QSqlQuery(db)
359comm_thread_query.prepare("INSERT INTO comm_threads VALUES (?, ?, ?)")
360dso_query = QSqlQuery(db)
361dso_query.prepare("INSERT INTO dsos VALUES (?, ?, ?, ?, ?)")
362symbol_query = QSqlQuery(db)
363symbol_query.prepare("INSERT INTO symbols VALUES (?, ?, ?, ?, ?, ?)")
364branch_type_query = QSqlQuery(db)
365branch_type_query.prepare("INSERT INTO branch_types VALUES (?, ?)")
366sample_query = QSqlQuery(db)
367if branches:
368 sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
369else:
370 sample_query.prepare("INSERT INTO samples VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
371if perf_db_export_calls or perf_db_export_callchains:
372 call_path_query = QSqlQuery(db)
373 call_path_query.prepare("INSERT INTO call_paths VALUES (?, ?, ?, ?)")
374if perf_db_export_calls:
375 call_query = QSqlQuery(db)
376 call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
377
378def trace_begin():
379 print datetime.datetime.today(), "Writing records..."
380 do_query(query, 'BEGIN TRANSACTION')
381 # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
382 evsel_table(0, "unknown")
383 machine_table(0, 0, "unknown")
384 thread_table(0, 0, 0, -1, -1)
385 comm_table(0, "unknown")
386 dso_table(0, 0, "unknown", "unknown", "")
387 symbol_table(0, 0, 0, 0, 0, "unknown")
388 sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
389 if perf_db_export_calls or perf_db_export_callchains:
390 call_path_table(0, 0, 0, 0)
391
392unhandled_count = 0
393
394def trace_end():
395 do_query(query, 'END TRANSACTION')
396
397 print datetime.datetime.today(), "Adding indexes"
398 if perf_db_export_calls:
399 do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
400
401 if (unhandled_count):
402 print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
403 print datetime.datetime.today(), "Done"
404
405def trace_unhandled(event_name, context, event_fields_dict):
406 global unhandled_count
407 unhandled_count += 1
408
409def sched__sched_switch(*x):
410 pass
411
412def bind_exec(q, n, x):
413 for xx in x[0:n]:
414 q.addBindValue(str(xx))
415 do_query_(q)
416
417def evsel_table(*x):
418 bind_exec(evsel_query, 2, x)
419
420def machine_table(*x):
421 bind_exec(machine_query, 3, x)
422
423def thread_table(*x):
424 bind_exec(thread_query, 5, x)
425
426def comm_table(*x):
427 bind_exec(comm_query, 2, x)
428
429def comm_thread_table(*x):
430 bind_exec(comm_thread_query, 3, x)
431
432def dso_table(*x):
433 bind_exec(dso_query, 5, x)
434
435def symbol_table(*x):
436 bind_exec(symbol_query, 6, x)
437
438def branch_type_table(*x):
439 bind_exec(branch_type_query, 2, x)
440
441def sample_table(*x):
442 if branches:
443 bind_exec(sample_query, 18, x)
444 else:
445 bind_exec(sample_query, 22, x)
446
447def call_path_table(*x):
448 bind_exec(call_path_query, 4, x)
449
450def call_return_table(*x):
451 bind_exec(call_query, 11, x)
diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py
new file mode 100644
index 000000000000..b19172d673af
--- /dev/null
+++ b/tools/perf/scripts/python/intel-pt-events.py
@@ -0,0 +1,128 @@
1# intel-pt-events.py: Print Intel PT Power Events and PTWRITE
2# Copyright (c) 2017, Intel Corporation.
3#
4# This program is free software; you can redistribute it and/or modify it
5# under the terms and conditions of the GNU General Public License,
6# version 2, as published by the Free Software Foundation.
7#
8# This program is distributed in the hope it will be useful, but WITHOUT
9# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11# more details.
12
13import os
14import sys
15import struct
16
17sys.path.append(os.environ['PERF_EXEC_PATH'] + \
18 '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
19
20# These perf imports are not used at present
21#from perf_trace_context import *
22#from Core import *
23
24def trace_begin():
25 print "Intel PT Power Events and PTWRITE"
26
27def trace_end():
28 print "End"
29
30def trace_unhandled(event_name, context, event_fields_dict):
31 print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
32
33def print_ptwrite(raw_buf):
34 data = struct.unpack_from("<IQ", raw_buf)
35 flags = data[0]
36 payload = data[1]
37 exact_ip = flags & 1
38 print "IP: %u payload: %#x" % (exact_ip, payload),
39
40def print_cbr(raw_buf):
41 data = struct.unpack_from("<BBBBII", raw_buf)
42 cbr = data[0]
43 f = (data[4] + 500) / 1000
44 p = ((cbr * 1000 / data[2]) + 5) / 10
45 print "%3u freq: %4u MHz (%3u%%)" % (cbr, f, p),
46
47def print_mwait(raw_buf):
48 data = struct.unpack_from("<IQ", raw_buf)
49 payload = data[1]
50 hints = payload & 0xff
51 extensions = (payload >> 32) & 0x3
52 print "hints: %#x extensions: %#x" % (hints, extensions),
53
54def print_pwre(raw_buf):
55 data = struct.unpack_from("<IQ", raw_buf)
56 payload = data[1]
57 hw = (payload >> 7) & 1
58 cstate = (payload >> 12) & 0xf
59 subcstate = (payload >> 8) & 0xf
60 print "hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
61
62def print_exstop(raw_buf):
63 data = struct.unpack_from("<I", raw_buf)
64 flags = data[0]
65 exact_ip = flags & 1
66 print "IP: %u" % (exact_ip),
67
68def print_pwrx(raw_buf):
69 data = struct.unpack_from("<IQ", raw_buf)
70 payload = data[1]
71 deepest_cstate = payload & 0xf
72 last_cstate = (payload >> 4) & 0xf
73 wake_reason = (payload >> 8) & 0xf
74 print "deepest cstate: %u last cstate: %u wake reason: %#x" % (deepest_cstate, last_cstate, wake_reason),
75
76def print_common_start(comm, sample, name):
77 ts = sample["time"]
78 cpu = sample["cpu"]
79 pid = sample["pid"]
80 tid = sample["tid"]
81 print "%16s %5u/%-5u [%03u] %9u.%09u %7s:" % (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
82
83def print_common_ip(sample, symbol, dso):
84 ip = sample["ip"]
85 print "%16x %s (%s)" % (ip, symbol, dso)
86
87def process_event(param_dict):
88 event_attr = param_dict["attr"]
89 sample = param_dict["sample"]
90 raw_buf = param_dict["raw_buf"]
91 comm = param_dict["comm"]
92 name = param_dict["ev_name"]
93
94 # Symbol and dso info are not always resolved
95 if (param_dict.has_key("dso")):
96 dso = param_dict["dso"]
97 else:
98 dso = "[unknown]"
99
100 if (param_dict.has_key("symbol")):
101 symbol = param_dict["symbol"]
102 else:
103 symbol = "[unknown]"
104
105 if name == "ptwrite":
106 print_common_start(comm, sample, name)
107 print_ptwrite(raw_buf)
108 print_common_ip(sample, symbol, dso)
109 elif name == "cbr":
110 print_common_start(comm, sample, name)
111 print_cbr(raw_buf)
112 print_common_ip(sample, symbol, dso)
113 elif name == "mwait":
114 print_common_start(comm, sample, name)
115 print_mwait(raw_buf)
116 print_common_ip(sample, symbol, dso)
117 elif name == "pwre":
118 print_common_start(comm, sample, name)
119 print_pwre(raw_buf)
120 print_common_ip(sample, symbol, dso)
121 elif name == "exstop":
122 print_common_start(comm, sample, name)
123 print_exstop(raw_buf)
124 print_common_ip(sample, symbol, dso)
125 elif name == "pwrx":
126 print_common_start(comm, sample, name)
127 print_pwrx(raw_buf)
128 print_common_ip(sample, symbol, dso)
diff --git a/tools/perf/scripts/python/net_dropmonitor.py b/tools/perf/scripts/python/net_dropmonitor.py
index 0b6ce8c253e8..a150164b44a3 100755
--- a/tools/perf/scripts/python/net_dropmonitor.py
+++ b/tools/perf/scripts/python/net_dropmonitor.py
@@ -1,4 +1,5 @@
1# Monitor the system for dropped packets and proudce a report of drop locations and counts 1# Monitor the system for dropped packets and proudce a report of drop locations and counts
2# SPDX-License-Identifier: GPL-2.0
2 3
3import os 4import os
4import sys 5import sys
diff --git a/tools/perf/scripts/python/netdev-times.py b/tools/perf/scripts/python/netdev-times.py
index 4c6f09ac7d12..9b2050f778f1 100644
--- a/tools/perf/scripts/python/netdev-times.py
+++ b/tools/perf/scripts/python/netdev-times.py
@@ -1,4 +1,5 @@
1# Display a process of packets and processed time. 1# Display a process of packets and processed time.
2# SPDX-License-Identifier: GPL-2.0
2# It helps us to investigate networking or network device. 3# It helps us to investigate networking or network device.
3# 4#
4# options 5# options
diff --git a/tools/perf/scripts/python/stackcollapse.py b/tools/perf/scripts/python/stackcollapse.py
index 5a605f70ef32..1697b5e18c96 100755
--- a/tools/perf/scripts/python/stackcollapse.py
+++ b/tools/perf/scripts/python/stackcollapse.py
@@ -1,4 +1,5 @@
1# stackcollapse.py - format perf samples with one line per distinct call stack 1# stackcollapse.py - format perf samples with one line per distinct call stack
2# SPDX-License-Identifier: GPL-2.0
2# 3#
3# This script's output has two space-separated fields. The first is a semicolon 4# This script's output has two space-separated fields. The first is a semicolon
4# separated stack including the program name (from the "comm" field) and the 5# separated stack including the program name (from the "comm" field) and the
diff --git a/tools/perf/scripts/python/stat-cpi.py b/tools/perf/scripts/python/stat-cpi.py
index 8b60f343dd07..8410672efb8b 100644
--- a/tools/perf/scripts/python/stat-cpi.py
+++ b/tools/perf/scripts/python/stat-cpi.py
@@ -1,4 +1,5 @@
1#!/usr/bin/env python 1#!/usr/bin/env python
2# SPDX-License-Identifier: GPL-2.0
2 3
3data = {} 4data = {}
4times = [] 5times = []