diff options
| author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
|---|---|---|
| committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
| commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
| tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /tools/perf/scripts/python | |
| parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) | |
Patched in Tegra support.
Diffstat (limited to 'tools/perf/scripts/python')
7 files changed, 0 insertions, 372 deletions
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py deleted file mode 100755 index 9e0985794e2..00000000000 --- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py +++ /dev/null | |||
| @@ -1,94 +0,0 @@ | |||
| 1 | # EventClass.py | ||
| 2 | # | ||
| 3 | # This is a library defining some events types classes, which could | ||
| 4 | # be used by other scripts to analyzing the perf samples. | ||
| 5 | # | ||
| 6 | # Currently there are just a few classes defined for examples, | ||
| 7 | # PerfEvent is the base class for all perf event sample, PebsEvent | ||
| 8 | # is a HW base Intel x86 PEBS event, and user could add more SW/HW | ||
| 9 | # event classes based on requirements. | ||
| 10 | |||
| 11 | import struct | ||
| 12 | |||
| 13 | # Event types, user could add more here | ||
| 14 | EVTYPE_GENERIC = 0 | ||
| 15 | EVTYPE_PEBS = 1 # Basic PEBS event | ||
| 16 | EVTYPE_PEBS_LL = 2 # PEBS event with load latency info | ||
| 17 | EVTYPE_IBS = 3 | ||
| 18 | |||
| 19 | # | ||
| 20 | # Currently we don't have good way to tell the event type, but by | ||
| 21 | # the size of raw buffer, raw PEBS event with load latency data's | ||
| 22 | # size is 176 bytes, while the pure PEBS event's size is 144 bytes. | ||
| 23 | # | ||
| 24 | def create_event(name, comm, dso, symbol, raw_buf): | ||
| 25 | if (len(raw_buf) == 144): | ||
| 26 | event = PebsEvent(name, comm, dso, symbol, raw_buf) | ||
| 27 | elif (len(raw_buf) == 176): | ||
| 28 | event = PebsNHM(name, comm, dso, symbol, raw_buf) | ||
| 29 | else: | ||
| 30 | event = PerfEvent(name, comm, dso, symbol, raw_buf) | ||
| 31 | |||
| 32 | return event | ||
| 33 | |||
| 34 | class PerfEvent(object): | ||
| 35 | event_num = 0 | ||
| 36 | def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC): | ||
| 37 | self.name = name | ||
| 38 | self.comm = comm | ||
| 39 | self.dso = dso | ||
| 40 | self.symbol = symbol | ||
| 41 | self.raw_buf = raw_buf | ||
| 42 | self.ev_type = ev_type | ||
| 43 | PerfEvent.event_num += 1 | ||
| 44 | |||
| 45 | def show(self): | ||
| 46 | print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso) | ||
| 47 | |||
| 48 | # | ||
| 49 | # Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer | ||
| 50 | # contains the context info when that event happened: the EFLAGS and | ||
| 51 | # linear IP info, as well as all the registers. | ||
| 52 | # | ||
| 53 | class PebsEvent(PerfEvent): | ||
| 54 | pebs_num = 0 | ||
| 55 | def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS): | ||
| 56 | tmp_buf=raw_buf[0:80] | ||
| 57 | flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf) | ||
| 58 | self.flags = flags | ||
| 59 | self.ip = ip | ||
| 60 | self.ax = ax | ||
| 61 | self.bx = bx | ||
| 62 | self.cx = cx | ||
| 63 | self.dx = dx | ||
| 64 | self.si = si | ||
| 65 | self.di = di | ||
| 66 | self.bp = bp | ||
| 67 | self.sp = sp | ||
| 68 | |||
| 69 | PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type) | ||
| 70 | PebsEvent.pebs_num += 1 | ||
| 71 | del tmp_buf | ||
| 72 | |||
| 73 | # | ||
| 74 | # Intel Nehalem and Westmere support PEBS plus Load Latency info which lie | ||
| 75 | # in the four 64 bit words write after the PEBS data: | ||
| 76 | # Status: records the IA32_PERF_GLOBAL_STATUS register value | ||
| 77 | # DLA: Data Linear Address (EIP) | ||
| 78 | # DSE: Data Source Encoding, where the latency happens, hit or miss | ||
| 79 | # in L1/L2/L3 or IO operations | ||
| 80 | # LAT: the actual latency in cycles | ||
| 81 | # | ||
| 82 | class PebsNHM(PebsEvent): | ||
| 83 | pebs_nhm_num = 0 | ||
| 84 | def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL): | ||
| 85 | tmp_buf=raw_buf[144:176] | ||
| 86 | status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf) | ||
| 87 | self.status = status | ||
| 88 | self.dla = dla | ||
| 89 | self.dse = dse | ||
| 90 | self.lat = lat | ||
| 91 | |||
| 92 | PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type) | ||
| 93 | PebsNHM.pebs_nhm_num += 1 | ||
| 94 | del tmp_buf | ||
diff --git a/tools/perf/scripts/python/bin/event_analyzing_sample-record b/tools/perf/scripts/python/bin/event_analyzing_sample-record deleted file mode 100644 index 5ce652dabd0..00000000000 --- a/tools/perf/scripts/python/bin/event_analyzing_sample-record +++ /dev/null | |||
| @@ -1,8 +0,0 @@ | |||
| 1 | #!/bin/bash | ||
| 2 | |||
| 3 | # | ||
| 4 | # event_analyzing_sample.py can cover all type of perf samples including | ||
| 5 | # the tracepoints, so no special record requirements, just record what | ||
| 6 | # you want to analyze. | ||
| 7 | # | ||
| 8 | perf record $@ | ||
diff --git a/tools/perf/scripts/python/bin/event_analyzing_sample-report b/tools/perf/scripts/python/bin/event_analyzing_sample-report deleted file mode 100644 index 0941fc94e15..00000000000 --- a/tools/perf/scripts/python/bin/event_analyzing_sample-report +++ /dev/null | |||
| @@ -1,3 +0,0 @@ | |||
| 1 | #!/bin/bash | ||
| 2 | # description: analyze all perf samples | ||
| 3 | perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/event_analyzing_sample.py | ||
diff --git a/tools/perf/scripts/python/bin/net_dropmonitor-record b/tools/perf/scripts/python/bin/net_dropmonitor-record deleted file mode 100755 index 423fb81dada..00000000000 --- a/tools/perf/scripts/python/bin/net_dropmonitor-record +++ /dev/null | |||
| @@ -1,2 +0,0 @@ | |||
| 1 | #!/bin/bash | ||
| 2 | perf record -e skb:kfree_skb $@ | ||
diff --git a/tools/perf/scripts/python/bin/net_dropmonitor-report b/tools/perf/scripts/python/bin/net_dropmonitor-report deleted file mode 100755 index 8d698f5a06a..00000000000 --- a/tools/perf/scripts/python/bin/net_dropmonitor-report +++ /dev/null | |||
| @@ -1,4 +0,0 @@ | |||
| 1 | #!/bin/bash | ||
| 2 | # description: display a table of dropped frames | ||
| 3 | |||
| 4 | perf script -s "$PERF_EXEC_PATH"/scripts/python/net_dropmonitor.py $@ | ||
diff --git a/tools/perf/scripts/python/event_analyzing_sample.py b/tools/perf/scripts/python/event_analyzing_sample.py deleted file mode 100644 index 163c39fa12d..00000000000 --- a/tools/perf/scripts/python/event_analyzing_sample.py +++ /dev/null | |||
| @@ -1,189 +0,0 @@ | |||
| 1 | # event_analyzing_sample.py: general event handler in python | ||
| 2 | # | ||
| 3 | # Current perf report is already very powerful with the annotation integrated, | ||
| 4 | # and this script is not trying to be as powerful as perf report, but | ||
| 5 | # providing end user/developer a flexible way to analyze the events other | ||
| 6 | # than trace points. | ||
| 7 | # | ||
| 8 | # The 2 database related functions in this script just show how to gather | ||
| 9 | # the basic information, and users can modify and write their own functions | ||
| 10 | # according to their specific requirement. | ||
| 11 | # | ||
| 12 | # The first function "show_general_events" just does a basic grouping for all | ||
| 13 | # generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is | ||
| 14 | # for a x86 HW PMU event: PEBS with load latency data. | ||
| 15 | # | ||
| 16 | |||
| 17 | import os | ||
| 18 | import sys | ||
| 19 | import math | ||
| 20 | import struct | ||
| 21 | import sqlite3 | ||
| 22 | |||
| 23 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ | ||
| 24 | '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') | ||
| 25 | |||
| 26 | from perf_trace_context import * | ||
| 27 | from EventClass import * | ||
| 28 | |||
| 29 | # | ||
| 30 | # If the perf.data has a big number of samples, then the insert operation | ||
| 31 | # will be very time consuming (about 10+ minutes for 10000 samples) if the | ||
| 32 | # .db database is on disk. Move the .db file to RAM based FS to speedup | ||
| 33 | # the handling, which will cut the time down to several seconds. | ||
| 34 | # | ||
| 35 | con = sqlite3.connect("/dev/shm/perf.db") | ||
| 36 | con.isolation_level = None | ||
| 37 | |||
| 38 | def trace_begin(): | ||
| 39 | print "In trace_begin:\n" | ||
| 40 | |||
| 41 | # | ||
| 42 | # Will create several tables at the start, pebs_ll is for PEBS data with | ||
| 43 | # load latency info, while gen_events is for general event. | ||
| 44 | # | ||
| 45 | con.execute(""" | ||
| 46 | create table if not exists gen_events ( | ||
| 47 | name text, | ||
| 48 | symbol text, | ||
| 49 | comm text, | ||
| 50 | dso text | ||
| 51 | );""") | ||
| 52 | con.execute(""" | ||
| 53 | create table if not exists pebs_ll ( | ||
| 54 | name text, | ||
| 55 | symbol text, | ||
| 56 | comm text, | ||
| 57 | dso text, | ||
| 58 | flags integer, | ||
| 59 | ip integer, | ||
| 60 | status integer, | ||
| 61 | dse integer, | ||
| 62 | dla integer, | ||
| 63 | lat integer | ||
| 64 | );""") | ||
| 65 | |||
| 66 | # | ||
| 67 | # Create and insert event object to a database so that user could | ||
| 68 | # do more analysis with simple database commands. | ||
| 69 | # | ||
| 70 | def process_event(param_dict): | ||
| 71 | event_attr = param_dict["attr"] | ||
| 72 | sample = param_dict["sample"] | ||
| 73 | raw_buf = param_dict["raw_buf"] | ||
| 74 | comm = param_dict["comm"] | ||
| 75 | name = param_dict["ev_name"] | ||
| 76 | |||
| 77 | # Symbol and dso info are not always resolved | ||
| 78 | if (param_dict.has_key("dso")): | ||
| 79 | dso = param_dict["dso"] | ||
| 80 | else: | ||
| 81 | dso = "Unknown_dso" | ||
| 82 | |||
| 83 | if (param_dict.has_key("symbol")): | ||
| 84 | symbol = param_dict["symbol"] | ||
| 85 | else: | ||
| 86 | symbol = "Unknown_symbol" | ||
| 87 | |||
| 88 | # Create the event object and insert it to the right table in database | ||
| 89 | event = create_event(name, comm, dso, symbol, raw_buf) | ||
| 90 | insert_db(event) | ||
| 91 | |||
| 92 | def insert_db(event): | ||
| 93 | if event.ev_type == EVTYPE_GENERIC: | ||
| 94 | con.execute("insert into gen_events values(?, ?, ?, ?)", | ||
| 95 | (event.name, event.symbol, event.comm, event.dso)) | ||
| 96 | elif event.ev_type == EVTYPE_PEBS_LL: | ||
| 97 | event.ip &= 0x7fffffffffffffff | ||
| 98 | event.dla &= 0x7fffffffffffffff | ||
| 99 | con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", | ||
| 100 | (event.name, event.symbol, event.comm, event.dso, event.flags, | ||
| 101 | event.ip, event.status, event.dse, event.dla, event.lat)) | ||
| 102 | |||
| 103 | def trace_end(): | ||
| 104 | print "In trace_end:\n" | ||
| 105 | # We show the basic info for the 2 type of event classes | ||
| 106 | show_general_events() | ||
| 107 | show_pebs_ll() | ||
| 108 | con.close() | ||
| 109 | |||
| 110 | # | ||
| 111 | # As the event number may be very big, so we can't use linear way | ||
| 112 | # to show the histogram in real number, but use a log2 algorithm. | ||
| 113 | # | ||
| 114 | |||
| 115 | def num2sym(num): | ||
| 116 | # Each number will have at least one '#' | ||
| 117 | snum = '#' * (int)(math.log(num, 2) + 1) | ||
| 118 | return snum | ||
| 119 | |||
| 120 | def show_general_events(): | ||
| 121 | |||
| 122 | # Check the total record number in the table | ||
| 123 | count = con.execute("select count(*) from gen_events") | ||
| 124 | for t in count: | ||
| 125 | print "There is %d records in gen_events table" % t[0] | ||
| 126 | if t[0] == 0: | ||
| 127 | return | ||
| 128 | |||
| 129 | print "Statistics about the general events grouped by thread/symbol/dso: \n" | ||
| 130 | |||
| 131 | # Group by thread | ||
| 132 | commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)") | ||
| 133 | print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42) | ||
| 134 | for row in commq: | ||
| 135 | print "%16s %8d %s" % (row[0], row[1], num2sym(row[1])) | ||
| 136 | |||
| 137 | # Group by symbol | ||
| 138 | print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58) | ||
| 139 | symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)") | ||
| 140 | for row in symbolq: | ||
| 141 | print "%32s %8d %s" % (row[0], row[1], num2sym(row[1])) | ||
| 142 | |||
| 143 | # Group by dso | ||
| 144 | print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74) | ||
| 145 | dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)") | ||
| 146 | for row in dsoq: | ||
| 147 | print "%40s %8d %s" % (row[0], row[1], num2sym(row[1])) | ||
| 148 | |||
| 149 | # | ||
| 150 | # This function just shows the basic info, and we could do more with the | ||
| 151 | # data in the tables, like checking the function parameters when some | ||
| 152 | # big latency events happen. | ||
| 153 | # | ||
| 154 | def show_pebs_ll(): | ||
| 155 | |||
| 156 | count = con.execute("select count(*) from pebs_ll") | ||
| 157 | for t in count: | ||
| 158 | print "There is %d records in pebs_ll table" % t[0] | ||
| 159 | if t[0] == 0: | ||
| 160 | return | ||
| 161 | |||
| 162 | print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n" | ||
| 163 | |||
| 164 | # Group by thread | ||
| 165 | commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)") | ||
| 166 | print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42) | ||
| 167 | for row in commq: | ||
| 168 | print "%16s %8d %s" % (row[0], row[1], num2sym(row[1])) | ||
| 169 | |||
| 170 | # Group by symbol | ||
| 171 | print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58) | ||
| 172 | symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)") | ||
| 173 | for row in symbolq: | ||
| 174 | print "%32s %8d %s" % (row[0], row[1], num2sym(row[1])) | ||
| 175 | |||
| 176 | # Group by dse | ||
| 177 | dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)") | ||
| 178 | print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58) | ||
| 179 | for row in dseq: | ||
| 180 | print "%32s %8d %s" % (row[0], row[1], num2sym(row[1])) | ||
| 181 | |||
| 182 | # Group by latency | ||
| 183 | latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat") | ||
| 184 | print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58) | ||
| 185 | for row in latq: | ||
| 186 | print "%32s %8d %s" % (row[0], row[1], num2sym(row[1])) | ||
| 187 | |||
| 188 | def trace_unhandled(event_name, context, event_fields_dict): | ||
| 189 | print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]) | ||
diff --git a/tools/perf/scripts/python/net_dropmonitor.py b/tools/perf/scripts/python/net_dropmonitor.py deleted file mode 100755 index a4ffc950002..00000000000 --- a/tools/perf/scripts/python/net_dropmonitor.py +++ /dev/null | |||
| @@ -1,72 +0,0 @@ | |||
| 1 | # Monitor the system for dropped packets and proudce a report of drop locations and counts | ||
| 2 | |||
| 3 | import os | ||
| 4 | import sys | ||
| 5 | |||
| 6 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ | ||
| 7 | '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') | ||
| 8 | |||
| 9 | from perf_trace_context import * | ||
| 10 | from Core import * | ||
| 11 | from Util import * | ||
| 12 | |||
| 13 | drop_log = {} | ||
| 14 | kallsyms = [] | ||
| 15 | |||
| 16 | def get_kallsyms_table(): | ||
| 17 | global kallsyms | ||
| 18 | try: | ||
| 19 | f = open("/proc/kallsyms", "r") | ||
| 20 | linecount = 0 | ||
| 21 | for line in f: | ||
| 22 | linecount = linecount+1 | ||
| 23 | f.seek(0) | ||
| 24 | except: | ||
| 25 | return | ||
| 26 | |||
| 27 | |||
| 28 | j = 0 | ||
| 29 | for line in f: | ||
| 30 | loc = int(line.split()[0], 16) | ||
| 31 | name = line.split()[2] | ||
| 32 | j = j +1 | ||
| 33 | if ((j % 100) == 0): | ||
| 34 | print "\r" + str(j) + "/" + str(linecount), | ||
| 35 | kallsyms.append({ 'loc': loc, 'name' : name}) | ||
| 36 | |||
| 37 | print "\r" + str(j) + "/" + str(linecount) | ||
| 38 | kallsyms.sort() | ||
| 39 | return | ||
| 40 | |||
| 41 | def get_sym(sloc): | ||
| 42 | loc = int(sloc) | ||
| 43 | for i in kallsyms: | ||
| 44 | if (i['loc'] >= loc): | ||
| 45 | return (i['name'], i['loc']-loc) | ||
| 46 | return (None, 0) | ||
| 47 | |||
| 48 | def print_drop_table(): | ||
| 49 | print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT") | ||
| 50 | for i in drop_log.keys(): | ||
| 51 | (sym, off) = get_sym(i) | ||
| 52 | if sym == None: | ||
| 53 | sym = i | ||
| 54 | print "%25s %25s %25s" % (sym, off, drop_log[i]) | ||
| 55 | |||
| 56 | |||
| 57 | def trace_begin(): | ||
| 58 | print "Starting trace (Ctrl-C to dump results)" | ||
| 59 | |||
| 60 | def trace_end(): | ||
| 61 | print "Gathering kallsyms data" | ||
| 62 | get_kallsyms_table() | ||
| 63 | print_drop_table() | ||
| 64 | |||
| 65 | # called from perf, when it finds a correspoinding event | ||
| 66 | def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, | ||
| 67 | skbaddr, protocol, location): | ||
| 68 | slocation = str(location) | ||
| 69 | try: | ||
| 70 | drop_log[slocation] = drop_log[slocation] + 1 | ||
| 71 | except: | ||
| 72 | drop_log[slocation] = 1 | ||
