diff options
| author | Takashi Iwai <tiwai@suse.de> | 2019-05-06 10:14:09 -0400 |
|---|---|---|
| committer | Takashi Iwai <tiwai@suse.de> | 2019-05-06 10:14:34 -0400 |
| commit | d81645510ce2a140816c4cb37c45b78d810ca63f (patch) | |
| tree | edd9464900904d22a23da362bb152669480c5d26 /tools/perf/scripts/python | |
| parent | 2854cd34fbab5f28a356d3667c26b7856a7b73e2 (diff) | |
| parent | 378d590c494551a68a824b939c711bb9a280e9ef (diff) | |
Merge tag 'asoc-v5.2' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus
ASoC: Updates for v5.2
This is a pretty huge set of changes, it's been a pretty active release
all round but the big thing with this release is the Sound Open Firmware
changes from Intel, providing another DSP framework for use with the
DSPs in their SoCs. This one works with the firmware of the same name
which is free software (unlike the previous DSP firmwares and framework)
and there has been some interest in adoption by other systems already so
hopefully we will see adoption by other vendors in the future.
Other highlights include:
- Support for MCLK/sample rate ratio setting in the generic cards.
- Support for pin switches in the generic cards.
- A big set of improvements to the TLV320AIC32x4 drivers from Annaliese
McDermond.
- New drivers for Freescale audio mixers, several Intel machines,
several Mediatek machines, Meson G12A, Sound Open Firmware and
Spreadtrum compressed audio and DMA devices.
Diffstat (limited to 'tools/perf/scripts/python')
| -rw-r--r-- | tools/perf/scripts/python/export-to-postgresql.py | 61 | ||||
| -rw-r--r-- | tools/perf/scripts/python/export-to-sqlite.py | 28 | ||||
| -rwxr-xr-x | tools/perf/scripts/python/exported-sql-viewer.py | 119 |
3 files changed, 153 insertions, 55 deletions
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py index 390a351d15ea..c3eae1d77d36 100644 --- a/tools/perf/scripts/python/export-to-postgresql.py +++ b/tools/perf/scripts/python/export-to-postgresql.py | |||
| @@ -10,6 +10,8 @@ | |||
| 10 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 10 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 11 | # more details. | 11 | # more details. |
| 12 | 12 | ||
| 13 | from __future__ import print_function | ||
| 14 | |||
| 13 | import os | 15 | import os |
| 14 | import sys | 16 | import sys |
| 15 | import struct | 17 | import struct |
| @@ -199,6 +201,18 @@ import datetime | |||
| 199 | 201 | ||
| 200 | from PySide.QtSql import * | 202 | from PySide.QtSql import * |
| 201 | 203 | ||
| 204 | if sys.version_info < (3, 0): | ||
| 205 | def toserverstr(str): | ||
| 206 | return str | ||
| 207 | def toclientstr(str): | ||
| 208 | return str | ||
| 209 | else: | ||
| 210 | # Assume UTF-8 server_encoding and client_encoding | ||
| 211 | def toserverstr(str): | ||
| 212 | return bytes(str, "UTF_8") | ||
| 213 | def toclientstr(str): | ||
| 214 | return bytes(str, "UTF_8") | ||
| 215 | |||
| 202 | # Need to access PostgreSQL C library directly to use COPY FROM STDIN | 216 | # Need to access PostgreSQL C library directly to use COPY FROM STDIN |
| 203 | from ctypes import * | 217 | from ctypes import * |
| 204 | libpq = CDLL("libpq.so.5") | 218 | libpq = CDLL("libpq.so.5") |
| @@ -234,12 +248,17 @@ perf_db_export_mode = True | |||
| 234 | perf_db_export_calls = False | 248 | perf_db_export_calls = False |
| 235 | perf_db_export_callchains = False | 249 | perf_db_export_callchains = False |
| 236 | 250 | ||
| 251 | def printerr(*args, **kw_args): | ||
| 252 | print(*args, file=sys.stderr, **kw_args) | ||
| 253 | |||
| 254 | def printdate(*args, **kw_args): | ||
| 255 | print(datetime.datetime.today(), *args, sep=' ', **kw_args) | ||
| 237 | 256 | ||
| 238 | def usage(): | 257 | def usage(): |
| 239 | print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]" | 258 | printerr("Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]") |
| 240 | print >> sys.stderr, "where: columns 'all' or 'branches'" | 259 | printerr("where: columns 'all' or 'branches'") |
| 241 | print >> sys.stderr, " calls 'calls' => create calls and call_paths table" | 260 | printerr(" calls 'calls' => create calls and call_paths table") |
| 242 | print >> sys.stderr, " callchains 'callchains' => create call_paths table" | 261 | printerr(" callchains 'callchains' => create call_paths table") |
| 243 | raise Exception("Too few arguments") | 262 | raise Exception("Too few arguments") |
| 244 | 263 | ||
| 245 | if (len(sys.argv) < 2): | 264 | if (len(sys.argv) < 2): |
| @@ -273,7 +292,7 @@ def do_query(q, s): | |||
| 273 | return | 292 | return |
| 274 | raise Exception("Query failed: " + q.lastError().text()) | 293 | raise Exception("Query failed: " + q.lastError().text()) |
| 275 | 294 | ||
| 276 | print datetime.datetime.today(), "Creating database..." | 295 | printdate("Creating database...") |
| 277 | 296 | ||
| 278 | db = QSqlDatabase.addDatabase('QPSQL') | 297 | db = QSqlDatabase.addDatabase('QPSQL') |
| 279 | query = QSqlQuery(db) | 298 | query = QSqlQuery(db) |
| @@ -506,12 +525,12 @@ do_query(query, 'CREATE VIEW samples_view AS ' | |||
| 506 | ' FROM samples') | 525 | ' FROM samples') |
| 507 | 526 | ||
| 508 | 527 | ||
| 509 | file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0) | 528 | file_header = struct.pack("!11sii", b"PGCOPY\n\377\r\n\0", 0, 0) |
| 510 | file_trailer = "\377\377" | 529 | file_trailer = b"\377\377" |
| 511 | 530 | ||
| 512 | def open_output_file(file_name): | 531 | def open_output_file(file_name): |
| 513 | path_name = output_dir_name + "/" + file_name | 532 | path_name = output_dir_name + "/" + file_name |
| 514 | file = open(path_name, "w+") | 533 | file = open(path_name, "wb+") |
| 515 | file.write(file_header) | 534 | file.write(file_header) |
| 516 | return file | 535 | return file |
| 517 | 536 | ||
| @@ -526,13 +545,13 @@ def copy_output_file_direct(file, table_name): | |||
| 526 | 545 | ||
| 527 | # Use COPY FROM STDIN because security may prevent postgres from accessing the files directly | 546 | # Use COPY FROM STDIN because security may prevent postgres from accessing the files directly |
| 528 | def copy_output_file(file, table_name): | 547 | def copy_output_file(file, table_name): |
| 529 | conn = PQconnectdb("dbname = " + dbname) | 548 | conn = PQconnectdb(toclientstr("dbname = " + dbname)) |
| 530 | if (PQstatus(conn)): | 549 | if (PQstatus(conn)): |
| 531 | raise Exception("COPY FROM STDIN PQconnectdb failed") | 550 | raise Exception("COPY FROM STDIN PQconnectdb failed") |
| 532 | file.write(file_trailer) | 551 | file.write(file_trailer) |
| 533 | file.seek(0) | 552 | file.seek(0) |
| 534 | sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')" | 553 | sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')" |
| 535 | res = PQexec(conn, sql) | 554 | res = PQexec(conn, toclientstr(sql)) |
| 536 | if (PQresultStatus(res) != 4): | 555 | if (PQresultStatus(res) != 4): |
| 537 | raise Exception("COPY FROM STDIN PQexec failed") | 556 | raise Exception("COPY FROM STDIN PQexec failed") |
| 538 | data = file.read(65536) | 557 | data = file.read(65536) |
| @@ -566,7 +585,7 @@ if perf_db_export_calls: | |||
| 566 | call_file = open_output_file("call_table.bin") | 585 | call_file = open_output_file("call_table.bin") |
| 567 | 586 | ||
| 568 | def trace_begin(): | 587 | def trace_begin(): |
| 569 | print datetime.datetime.today(), "Writing to intermediate files..." | 588 | printdate("Writing to intermediate files...") |
| 570 | # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs | 589 | # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs |
| 571 | evsel_table(0, "unknown") | 590 | evsel_table(0, "unknown") |
| 572 | machine_table(0, 0, "unknown") | 591 | machine_table(0, 0, "unknown") |
| @@ -582,7 +601,7 @@ def trace_begin(): | |||
| 582 | unhandled_count = 0 | 601 | unhandled_count = 0 |
| 583 | 602 | ||
| 584 | def trace_end(): | 603 | def trace_end(): |
| 585 | print datetime.datetime.today(), "Copying to database..." | 604 | printdate("Copying to database...") |
| 586 | copy_output_file(evsel_file, "selected_events") | 605 | copy_output_file(evsel_file, "selected_events") |
| 587 | copy_output_file(machine_file, "machines") | 606 | copy_output_file(machine_file, "machines") |
| 588 | copy_output_file(thread_file, "threads") | 607 | copy_output_file(thread_file, "threads") |
| @@ -597,7 +616,7 @@ def trace_end(): | |||
| 597 | if perf_db_export_calls: | 616 | if perf_db_export_calls: |
| 598 | copy_output_file(call_file, "calls") | 617 | copy_output_file(call_file, "calls") |
| 599 | 618 | ||
| 600 | print datetime.datetime.today(), "Removing intermediate files..." | 619 | printdate("Removing intermediate files...") |
| 601 | remove_output_file(evsel_file) | 620 | remove_output_file(evsel_file) |
| 602 | remove_output_file(machine_file) | 621 | remove_output_file(machine_file) |
| 603 | remove_output_file(thread_file) | 622 | remove_output_file(thread_file) |
| @@ -612,7 +631,7 @@ def trace_end(): | |||
| 612 | if perf_db_export_calls: | 631 | if perf_db_export_calls: |
| 613 | remove_output_file(call_file) | 632 | remove_output_file(call_file) |
| 614 | os.rmdir(output_dir_name) | 633 | os.rmdir(output_dir_name) |
| 615 | print datetime.datetime.today(), "Adding primary keys" | 634 | printdate("Adding primary keys") |
| 616 | do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)') | 635 | do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)') |
| 617 | do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)') | 636 | do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)') |
| 618 | do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)') | 637 | do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)') |
| @@ -627,7 +646,7 @@ def trace_end(): | |||
| 627 | if perf_db_export_calls: | 646 | if perf_db_export_calls: |
| 628 | do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)') | 647 | do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)') |
| 629 | 648 | ||
| 630 | print datetime.datetime.today(), "Adding foreign keys" | 649 | printdate("Adding foreign keys") |
| 631 | do_query(query, 'ALTER TABLE threads ' | 650 | do_query(query, 'ALTER TABLE threads ' |
| 632 | 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),' | 651 | 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),' |
| 633 | 'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)') | 652 | 'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)') |
| @@ -663,8 +682,8 @@ def trace_end(): | |||
| 663 | do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)') | 682 | do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)') |
| 664 | 683 | ||
| 665 | if (unhandled_count): | 684 | if (unhandled_count): |
| 666 | print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events" | 685 | printdate("Warning: ", unhandled_count, " unhandled events") |
| 667 | print datetime.datetime.today(), "Done" | 686 | printdate("Done") |
| 668 | 687 | ||
| 669 | def trace_unhandled(event_name, context, event_fields_dict): | 688 | def trace_unhandled(event_name, context, event_fields_dict): |
| 670 | global unhandled_count | 689 | global unhandled_count |
| @@ -674,12 +693,14 @@ def sched__sched_switch(*x): | |||
| 674 | pass | 693 | pass |
| 675 | 694 | ||
| 676 | def evsel_table(evsel_id, evsel_name, *x): | 695 | def evsel_table(evsel_id, evsel_name, *x): |
| 696 | evsel_name = toserverstr(evsel_name) | ||
| 677 | n = len(evsel_name) | 697 | n = len(evsel_name) |
| 678 | fmt = "!hiqi" + str(n) + "s" | 698 | fmt = "!hiqi" + str(n) + "s" |
| 679 | value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name) | 699 | value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name) |
| 680 | evsel_file.write(value) | 700 | evsel_file.write(value) |
| 681 | 701 | ||
| 682 | def machine_table(machine_id, pid, root_dir, *x): | 702 | def machine_table(machine_id, pid, root_dir, *x): |
| 703 | root_dir = toserverstr(root_dir) | ||
| 683 | n = len(root_dir) | 704 | n = len(root_dir) |
| 684 | fmt = "!hiqiii" + str(n) + "s" | 705 | fmt = "!hiqiii" + str(n) + "s" |
| 685 | value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir) | 706 | value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir) |
| @@ -690,6 +711,7 @@ def thread_table(thread_id, machine_id, process_id, pid, tid, *x): | |||
| 690 | thread_file.write(value) | 711 | thread_file.write(value) |
| 691 | 712 | ||
| 692 | def comm_table(comm_id, comm_str, *x): | 713 | def comm_table(comm_id, comm_str, *x): |
| 714 | comm_str = toserverstr(comm_str) | ||
| 693 | n = len(comm_str) | 715 | n = len(comm_str) |
| 694 | fmt = "!hiqi" + str(n) + "s" | 716 | fmt = "!hiqi" + str(n) + "s" |
| 695 | value = struct.pack(fmt, 2, 8, comm_id, n, comm_str) | 717 | value = struct.pack(fmt, 2, 8, comm_id, n, comm_str) |
| @@ -701,6 +723,9 @@ def comm_thread_table(comm_thread_id, comm_id, thread_id, *x): | |||
| 701 | comm_thread_file.write(value) | 723 | comm_thread_file.write(value) |
| 702 | 724 | ||
| 703 | def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x): | 725 | def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x): |
| 726 | short_name = toserverstr(short_name) | ||
| 727 | long_name = toserverstr(long_name) | ||
| 728 | build_id = toserverstr(build_id) | ||
| 704 | n1 = len(short_name) | 729 | n1 = len(short_name) |
| 705 | n2 = len(long_name) | 730 | n2 = len(long_name) |
| 706 | n3 = len(build_id) | 731 | n3 = len(build_id) |
| @@ -709,12 +734,14 @@ def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x): | |||
| 709 | dso_file.write(value) | 734 | dso_file.write(value) |
| 710 | 735 | ||
| 711 | def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x): | 736 | def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x): |
| 737 | symbol_name = toserverstr(symbol_name) | ||
| 712 | n = len(symbol_name) | 738 | n = len(symbol_name) |
| 713 | fmt = "!hiqiqiqiqiii" + str(n) + "s" | 739 | fmt = "!hiqiqiqiqiii" + str(n) + "s" |
| 714 | value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name) | 740 | value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name) |
| 715 | symbol_file.write(value) | 741 | symbol_file.write(value) |
| 716 | 742 | ||
| 717 | def branch_type_table(branch_type, name, *x): | 743 | def branch_type_table(branch_type, name, *x): |
| 744 | name = toserverstr(name) | ||
| 718 | n = len(name) | 745 | n = len(name) |
| 719 | fmt = "!hiii" + str(n) + "s" | 746 | fmt = "!hiii" + str(n) + "s" |
| 720 | value = struct.pack(fmt, 2, 4, branch_type, n, name) | 747 | value = struct.pack(fmt, 2, 4, branch_type, n, name) |
diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py index eb63e6c7107f..bf271fbc3a88 100644 --- a/tools/perf/scripts/python/export-to-sqlite.py +++ b/tools/perf/scripts/python/export-to-sqlite.py | |||
| @@ -10,6 +10,8 @@ | |||
| 10 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 10 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 11 | # more details. | 11 | # more details. |
| 12 | 12 | ||
| 13 | from __future__ import print_function | ||
| 14 | |||
| 13 | import os | 15 | import os |
| 14 | import sys | 16 | import sys |
| 15 | import struct | 17 | import struct |
| @@ -60,11 +62,17 @@ perf_db_export_mode = True | |||
| 60 | perf_db_export_calls = False | 62 | perf_db_export_calls = False |
| 61 | perf_db_export_callchains = False | 63 | perf_db_export_callchains = False |
| 62 | 64 | ||
| 65 | def printerr(*args, **keyword_args): | ||
| 66 | print(*args, file=sys.stderr, **keyword_args) | ||
| 67 | |||
| 68 | def printdate(*args, **kw_args): | ||
| 69 | print(datetime.datetime.today(), *args, sep=' ', **kw_args) | ||
| 70 | |||
| 63 | def usage(): | 71 | def usage(): |
| 64 | print >> sys.stderr, "Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]" | 72 | printerr("Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]"); |
| 65 | print >> sys.stderr, "where: columns 'all' or 'branches'" | 73 | printerr("where: columns 'all' or 'branches'"); |
| 66 | print >> sys.stderr, " calls 'calls' => create calls and call_paths table" | 74 | printerr(" calls 'calls' => create calls and call_paths table"); |
| 67 | print >> sys.stderr, " callchains 'callchains' => create call_paths table" | 75 | printerr(" callchains 'callchains' => create call_paths table"); |
| 68 | raise Exception("Too few arguments") | 76 | raise Exception("Too few arguments") |
| 69 | 77 | ||
| 70 | if (len(sys.argv) < 2): | 78 | if (len(sys.argv) < 2): |
| @@ -100,7 +108,7 @@ def do_query_(q): | |||
| 100 | return | 108 | return |
| 101 | raise Exception("Query failed: " + q.lastError().text()) | 109 | raise Exception("Query failed: " + q.lastError().text()) |
| 102 | 110 | ||
| 103 | print datetime.datetime.today(), "Creating database..." | 111 | printdate("Creating database ...") |
| 104 | 112 | ||
| 105 | db_exists = False | 113 | db_exists = False |
| 106 | try: | 114 | try: |
| @@ -323,7 +331,7 @@ if perf_db_export_calls: | |||
| 323 | 'return_id,' | 331 | 'return_id,' |
| 324 | 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,' | 332 | 'CASE WHEN flags=0 THEN \'\' WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' WHEN flags=6 THEN \'jump\' ELSE flags END AS flags,' |
| 325 | 'parent_call_path_id,' | 333 | 'parent_call_path_id,' |
| 326 | 'parent_id' | 334 | 'calls.parent_id' |
| 327 | ' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id') | 335 | ' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id') |
| 328 | 336 | ||
| 329 | do_query(query, 'CREATE VIEW samples_view AS ' | 337 | do_query(query, 'CREATE VIEW samples_view AS ' |
| @@ -378,7 +386,7 @@ if perf_db_export_calls: | |||
| 378 | call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") | 386 | call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") |
| 379 | 387 | ||
| 380 | def trace_begin(): | 388 | def trace_begin(): |
| 381 | print datetime.datetime.today(), "Writing records..." | 389 | printdate("Writing records...") |
| 382 | do_query(query, 'BEGIN TRANSACTION') | 390 | do_query(query, 'BEGIN TRANSACTION') |
| 383 | # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs | 391 | # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs |
| 384 | evsel_table(0, "unknown") | 392 | evsel_table(0, "unknown") |
| @@ -397,14 +405,14 @@ unhandled_count = 0 | |||
| 397 | def trace_end(): | 405 | def trace_end(): |
| 398 | do_query(query, 'END TRANSACTION') | 406 | do_query(query, 'END TRANSACTION') |
| 399 | 407 | ||
| 400 | print datetime.datetime.today(), "Adding indexes" | 408 | printdate("Adding indexes") |
| 401 | if perf_db_export_calls: | 409 | if perf_db_export_calls: |
| 402 | do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)') | 410 | do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)') |
| 403 | do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)') | 411 | do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)') |
| 404 | 412 | ||
| 405 | if (unhandled_count): | 413 | if (unhandled_count): |
| 406 | print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events" | 414 | printdate("Warning: ", unhandled_count, " unhandled events") |
| 407 | print datetime.datetime.today(), "Done" | 415 | printdate("Done") |
| 408 | 416 | ||
| 409 | def trace_unhandled(event_name, context, event_fields_dict): | 417 | def trace_unhandled(event_name, context, event_fields_dict): |
| 410 | global unhandled_count | 418 | global unhandled_count |
diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py index afec9479ca7f..74ef92f1d19a 100755 --- a/tools/perf/scripts/python/exported-sql-viewer.py +++ b/tools/perf/scripts/python/exported-sql-viewer.py | |||
| @@ -88,20 +88,39 @@ | |||
| 88 | # 7fab593ea956 48 89 15 3b 13 22 00 movq %rdx, 0x22133b(%rip) | 88 | # 7fab593ea956 48 89 15 3b 13 22 00 movq %rdx, 0x22133b(%rip) |
| 89 | # 8107675243232 2 ls 22011 22011 hardware interrupt No 7fab593ea956 _dl_start+0x26 (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel]) | 89 | # 8107675243232 2 ls 22011 22011 hardware interrupt No 7fab593ea956 _dl_start+0x26 (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel]) |
| 90 | 90 | ||
| 91 | from __future__ import print_function | ||
| 92 | |||
| 91 | import sys | 93 | import sys |
| 92 | import weakref | 94 | import weakref |
| 93 | import threading | 95 | import threading |
| 94 | import string | 96 | import string |
| 95 | import cPickle | 97 | try: |
| 98 | # Python2 | ||
| 99 | import cPickle as pickle | ||
| 100 | # size of pickled integer big enough for record size | ||
| 101 | glb_nsz = 8 | ||
| 102 | except ImportError: | ||
| 103 | import pickle | ||
| 104 | glb_nsz = 16 | ||
| 96 | import re | 105 | import re |
| 97 | import os | 106 | import os |
| 98 | from PySide.QtCore import * | 107 | from PySide.QtCore import * |
| 99 | from PySide.QtGui import * | 108 | from PySide.QtGui import * |
| 100 | from PySide.QtSql import * | 109 | from PySide.QtSql import * |
| 110 | pyside_version_1 = True | ||
| 101 | from decimal import * | 111 | from decimal import * |
| 102 | from ctypes import * | 112 | from ctypes import * |
| 103 | from multiprocessing import Process, Array, Value, Event | 113 | from multiprocessing import Process, Array, Value, Event |
| 104 | 114 | ||
| 115 | # xrange is range in Python3 | ||
| 116 | try: | ||
| 117 | xrange | ||
| 118 | except NameError: | ||
| 119 | xrange = range | ||
| 120 | |||
| 121 | def printerr(*args, **keyword_args): | ||
| 122 | print(*args, file=sys.stderr, **keyword_args) | ||
| 123 | |||
| 105 | # Data formatting helpers | 124 | # Data formatting helpers |
| 106 | 125 | ||
| 107 | def tohex(ip): | 126 | def tohex(ip): |
| @@ -1004,10 +1023,6 @@ class ChildDataItemFinder(): | |||
| 1004 | 1023 | ||
| 1005 | glb_chunk_sz = 10000 | 1024 | glb_chunk_sz = 10000 |
| 1006 | 1025 | ||
| 1007 | # size of pickled integer big enough for record size | ||
| 1008 | |||
| 1009 | glb_nsz = 8 | ||
| 1010 | |||
| 1011 | # Background process for SQL data fetcher | 1026 | # Background process for SQL data fetcher |
| 1012 | 1027 | ||
| 1013 | class SQLFetcherProcess(): | 1028 | class SQLFetcherProcess(): |
| @@ -1066,7 +1081,7 @@ class SQLFetcherProcess(): | |||
| 1066 | return True | 1081 | return True |
| 1067 | if space >= glb_nsz: | 1082 | if space >= glb_nsz: |
| 1068 | # Use 0 (or space < glb_nsz) to mean there is no more at the top of the buffer | 1083 | # Use 0 (or space < glb_nsz) to mean there is no more at the top of the buffer |
| 1069 | nd = cPickle.dumps(0, cPickle.HIGHEST_PROTOCOL) | 1084 | nd = pickle.dumps(0, pickle.HIGHEST_PROTOCOL) |
| 1070 | self.buffer[self.local_head : self.local_head + len(nd)] = nd | 1085 | self.buffer[self.local_head : self.local_head + len(nd)] = nd |
| 1071 | self.local_head = 0 | 1086 | self.local_head = 0 |
| 1072 | if self.local_tail - self.local_head > sz: | 1087 | if self.local_tail - self.local_head > sz: |
| @@ -1084,9 +1099,9 @@ class SQLFetcherProcess(): | |||
| 1084 | self.wait_event.wait() | 1099 | self.wait_event.wait() |
| 1085 | 1100 | ||
| 1086 | def AddToBuffer(self, obj): | 1101 | def AddToBuffer(self, obj): |
| 1087 | d = cPickle.dumps(obj, cPickle.HIGHEST_PROTOCOL) | 1102 | d = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL) |
| 1088 | n = len(d) | 1103 | n = len(d) |
| 1089 | nd = cPickle.dumps(n, cPickle.HIGHEST_PROTOCOL) | 1104 | nd = pickle.dumps(n, pickle.HIGHEST_PROTOCOL) |
| 1090 | sz = n + glb_nsz | 1105 | sz = n + glb_nsz |
| 1091 | self.WaitForSpace(sz) | 1106 | self.WaitForSpace(sz) |
| 1092 | pos = self.local_head | 1107 | pos = self.local_head |
| @@ -1198,12 +1213,12 @@ class SQLFetcher(QObject): | |||
| 1198 | pos = self.local_tail | 1213 | pos = self.local_tail |
| 1199 | if len(self.buffer) - pos < glb_nsz: | 1214 | if len(self.buffer) - pos < glb_nsz: |
| 1200 | pos = 0 | 1215 | pos = 0 |
| 1201 | n = cPickle.loads(self.buffer[pos : pos + glb_nsz]) | 1216 | n = pickle.loads(self.buffer[pos : pos + glb_nsz]) |
| 1202 | if n == 0: | 1217 | if n == 0: |
| 1203 | pos = 0 | 1218 | pos = 0 |
| 1204 | n = cPickle.loads(self.buffer[0 : glb_nsz]) | 1219 | n = pickle.loads(self.buffer[0 : glb_nsz]) |
| 1205 | pos += glb_nsz | 1220 | pos += glb_nsz |
| 1206 | obj = cPickle.loads(self.buffer[pos : pos + n]) | 1221 | obj = pickle.loads(self.buffer[pos : pos + n]) |
| 1207 | self.local_tail = pos + n | 1222 | self.local_tail = pos + n |
| 1208 | return obj | 1223 | return obj |
| 1209 | 1224 | ||
| @@ -1512,6 +1527,19 @@ def BranchDataPrep(query): | |||
| 1512 | " (" + dsoname(query.value(15)) + ")") | 1527 | " (" + dsoname(query.value(15)) + ")") |
| 1513 | return data | 1528 | return data |
| 1514 | 1529 | ||
| 1530 | def BranchDataPrepWA(query): | ||
| 1531 | data = [] | ||
| 1532 | data.append(query.value(0)) | ||
| 1533 | # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string | ||
| 1534 | data.append("{:>19}".format(query.value(1))) | ||
| 1535 | for i in xrange(2, 8): | ||
| 1536 | data.append(query.value(i)) | ||
| 1537 | data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) + | ||
| 1538 | " (" + dsoname(query.value(11)) + ")" + " -> " + | ||
| 1539 | tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) + | ||
| 1540 | " (" + dsoname(query.value(15)) + ")") | ||
| 1541 | return data | ||
| 1542 | |||
| 1515 | # Branch data model | 1543 | # Branch data model |
| 1516 | 1544 | ||
| 1517 | class BranchModel(TreeModel): | 1545 | class BranchModel(TreeModel): |
| @@ -1539,7 +1567,11 @@ class BranchModel(TreeModel): | |||
| 1539 | " AND evsel_id = " + str(self.event_id) + | 1567 | " AND evsel_id = " + str(self.event_id) + |
| 1540 | " ORDER BY samples.id" | 1568 | " ORDER BY samples.id" |
| 1541 | " LIMIT " + str(glb_chunk_sz)) | 1569 | " LIMIT " + str(glb_chunk_sz)) |
| 1542 | self.fetcher = SQLFetcher(glb, sql, BranchDataPrep, self.AddSample) | 1570 | if pyside_version_1 and sys.version_info[0] == 3: |
| 1571 | prep = BranchDataPrepWA | ||
| 1572 | else: | ||
| 1573 | prep = BranchDataPrep | ||
| 1574 | self.fetcher = SQLFetcher(glb, sql, prep, self.AddSample) | ||
| 1543 | self.fetcher.done.connect(self.Update) | 1575 | self.fetcher.done.connect(self.Update) |
| 1544 | self.fetcher.Fetch(glb_chunk_sz) | 1576 | self.fetcher.Fetch(glb_chunk_sz) |
| 1545 | 1577 | ||
| @@ -2065,14 +2097,6 @@ def IsSelectable(db, table, sql = ""): | |||
| 2065 | return False | 2097 | return False |
| 2066 | return True | 2098 | return True |
| 2067 | 2099 | ||
| 2068 | # SQL data preparation | ||
| 2069 | |||
| 2070 | def SQLTableDataPrep(query, count): | ||
| 2071 | data = [] | ||
| 2072 | for i in xrange(count): | ||
| 2073 | data.append(query.value(i)) | ||
| 2074 | return data | ||
| 2075 | |||
| 2076 | # SQL table data model item | 2100 | # SQL table data model item |
| 2077 | 2101 | ||
| 2078 | class SQLTableItem(): | 2102 | class SQLTableItem(): |
| @@ -2096,7 +2120,7 @@ class SQLTableModel(TableModel): | |||
| 2096 | self.more = True | 2120 | self.more = True |
| 2097 | self.populated = 0 | 2121 | self.populated = 0 |
| 2098 | self.column_headers = column_headers | 2122 | self.column_headers = column_headers |
| 2099 | self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): SQLTableDataPrep(x, y), self.AddSample) | 2123 | self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): self.SQLTableDataPrep(x, y), self.AddSample) |
| 2100 | self.fetcher.done.connect(self.Update) | 2124 | self.fetcher.done.connect(self.Update) |
| 2101 | self.fetcher.Fetch(glb_chunk_sz) | 2125 | self.fetcher.Fetch(glb_chunk_sz) |
| 2102 | 2126 | ||
| @@ -2140,6 +2164,12 @@ class SQLTableModel(TableModel): | |||
| 2140 | def columnHeader(self, column): | 2164 | def columnHeader(self, column): |
| 2141 | return self.column_headers[column] | 2165 | return self.column_headers[column] |
| 2142 | 2166 | ||
| 2167 | def SQLTableDataPrep(self, query, count): | ||
| 2168 | data = [] | ||
| 2169 | for i in xrange(count): | ||
| 2170 | data.append(query.value(i)) | ||
| 2171 | return data | ||
| 2172 | |||
| 2143 | # SQL automatic table data model | 2173 | # SQL automatic table data model |
| 2144 | 2174 | ||
| 2145 | class SQLAutoTableModel(SQLTableModel): | 2175 | class SQLAutoTableModel(SQLTableModel): |
| @@ -2168,8 +2198,32 @@ class SQLAutoTableModel(SQLTableModel): | |||
| 2168 | QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'") | 2198 | QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'") |
| 2169 | while query.next(): | 2199 | while query.next(): |
| 2170 | column_headers.append(query.value(0)) | 2200 | column_headers.append(query.value(0)) |
| 2201 | if pyside_version_1 and sys.version_info[0] == 3: | ||
| 2202 | if table_name == "samples_view": | ||
| 2203 | self.SQLTableDataPrep = self.samples_view_DataPrep | ||
| 2204 | if table_name == "samples": | ||
| 2205 | self.SQLTableDataPrep = self.samples_DataPrep | ||
| 2171 | super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent) | 2206 | super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent) |
| 2172 | 2207 | ||
| 2208 | def samples_view_DataPrep(self, query, count): | ||
| 2209 | data = [] | ||
| 2210 | data.append(query.value(0)) | ||
| 2211 | # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string | ||
| 2212 | data.append("{:>19}".format(query.value(1))) | ||
| 2213 | for i in xrange(2, count): | ||
| 2214 | data.append(query.value(i)) | ||
| 2215 | return data | ||
| 2216 | |||
| 2217 | def samples_DataPrep(self, query, count): | ||
| 2218 | data = [] | ||
| 2219 | for i in xrange(9): | ||
| 2220 | data.append(query.value(i)) | ||
| 2221 | # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string | ||
| 2222 | data.append("{:>19}".format(query.value(9))) | ||
| 2223 | for i in xrange(10, count): | ||
| 2224 | data.append(query.value(i)) | ||
| 2225 | return data | ||
| 2226 | |||
| 2173 | # Base class for custom ResizeColumnsToContents | 2227 | # Base class for custom ResizeColumnsToContents |
| 2174 | 2228 | ||
| 2175 | class ResizeColumnsToContentsBase(QObject): | 2229 | class ResizeColumnsToContentsBase(QObject): |
| @@ -2854,9 +2908,13 @@ class LibXED(): | |||
| 2854 | ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0) | 2908 | ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0) |
| 2855 | if not ok: | 2909 | if not ok: |
| 2856 | return 0, "" | 2910 | return 0, "" |
| 2911 | if sys.version_info[0] == 2: | ||
| 2912 | result = inst.buffer.value | ||
| 2913 | else: | ||
| 2914 | result = inst.buffer.value.decode() | ||
| 2857 | # Return instruction length and the disassembled instruction text | 2915 | # Return instruction length and the disassembled instruction text |
| 2858 | # For now, assume the length is in byte 166 | 2916 | # For now, assume the length is in byte 166 |
| 2859 | return inst.xedd[166], inst.buffer.value | 2917 | return inst.xedd[166], result |
| 2860 | 2918 | ||
| 2861 | def TryOpen(file_name): | 2919 | def TryOpen(file_name): |
| 2862 | try: | 2920 | try: |
| @@ -2872,9 +2930,14 @@ def Is64Bit(f): | |||
| 2872 | header = f.read(7) | 2930 | header = f.read(7) |
| 2873 | f.seek(pos) | 2931 | f.seek(pos) |
| 2874 | magic = header[0:4] | 2932 | magic = header[0:4] |
| 2875 | eclass = ord(header[4]) | 2933 | if sys.version_info[0] == 2: |
| 2876 | encoding = ord(header[5]) | 2934 | eclass = ord(header[4]) |
| 2877 | version = ord(header[6]) | 2935 | encoding = ord(header[5]) |
| 2936 | version = ord(header[6]) | ||
| 2937 | else: | ||
| 2938 | eclass = header[4] | ||
| 2939 | encoding = header[5] | ||
| 2940 | version = header[6] | ||
| 2878 | if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1: | 2941 | if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1: |
| 2879 | result = True if eclass == 2 else False | 2942 | result = True if eclass == 2 else False |
| 2880 | return result | 2943 | return result |
| @@ -2973,7 +3036,7 @@ class DBRef(): | |||
| 2973 | 3036 | ||
| 2974 | def Main(): | 3037 | def Main(): |
| 2975 | if (len(sys.argv) < 2): | 3038 | if (len(sys.argv) < 2): |
| 2976 | print >> sys.stderr, "Usage is: exported-sql-viewer.py {<database name> | --help-only}" | 3039 | printerr("Usage is: exported-sql-viewer.py {<database name> | --help-only}"); |
| 2977 | raise Exception("Too few arguments") | 3040 | raise Exception("Too few arguments") |
| 2978 | 3041 | ||
| 2979 | dbname = sys.argv[1] | 3042 | dbname = sys.argv[1] |
| @@ -2986,8 +3049,8 @@ def Main(): | |||
| 2986 | 3049 | ||
| 2987 | is_sqlite3 = False | 3050 | is_sqlite3 = False |
| 2988 | try: | 3051 | try: |
| 2989 | f = open(dbname) | 3052 | f = open(dbname, "rb") |
| 2990 | if f.read(15) == "SQLite format 3": | 3053 | if f.read(15) == b'SQLite format 3': |
| 2991 | is_sqlite3 = True | 3054 | is_sqlite3 = True |
| 2992 | f.close() | 3055 | f.close() |
| 2993 | except: | 3056 | except: |
