diff options
Diffstat (limited to 'tools/perf/scripts/python/sched-migration.py')
-rw-r--r-- | tools/perf/scripts/python/sched-migration.py | 461 |
1 files changed, 461 insertions, 0 deletions
diff --git a/tools/perf/scripts/python/sched-migration.py b/tools/perf/scripts/python/sched-migration.py new file mode 100644 index 000000000000..b934383c3364 --- /dev/null +++ b/tools/perf/scripts/python/sched-migration.py | |||
@@ -0,0 +1,461 @@ | |||
1 | #!/usr/bin/python | ||
2 | # | ||
3 | # Cpu task migration overview toy | ||
4 | # | ||
5 | # Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com> | ||
6 | # | ||
7 | # perf trace event handlers have been generated by perf trace -g python | ||
8 | # | ||
9 | # This software is distributed under the terms of the GNU General | ||
10 | # Public License ("GPL") version 2 as published by the Free Software | ||
11 | # Foundation. | ||
12 | |||
13 | |||
14 | import os | ||
15 | import sys | ||
16 | |||
17 | from collections import defaultdict | ||
18 | from UserList import UserList | ||
19 | |||
20 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ | ||
21 | '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') | ||
22 | sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace') | ||
23 | |||
24 | from perf_trace_context import * | ||
25 | from Core import * | ||
26 | from SchedGui import * | ||
27 | |||
28 | |||
29 | threads = { 0 : "idle"} | ||
30 | |||
31 | def thread_name(pid): | ||
32 | return "%s:%d" % (threads[pid], pid) | ||
33 | |||
34 | class RunqueueEventUnknown: | ||
35 | @staticmethod | ||
36 | def color(): | ||
37 | return None | ||
38 | |||
39 | def __repr__(self): | ||
40 | return "unknown" | ||
41 | |||
42 | class RunqueueEventSleep: | ||
43 | @staticmethod | ||
44 | def color(): | ||
45 | return (0, 0, 0xff) | ||
46 | |||
47 | def __init__(self, sleeper): | ||
48 | self.sleeper = sleeper | ||
49 | |||
50 | def __repr__(self): | ||
51 | return "%s gone to sleep" % thread_name(self.sleeper) | ||
52 | |||
53 | class RunqueueEventWakeup: | ||
54 | @staticmethod | ||
55 | def color(): | ||
56 | return (0xff, 0xff, 0) | ||
57 | |||
58 | def __init__(self, wakee): | ||
59 | self.wakee = wakee | ||
60 | |||
61 | def __repr__(self): | ||
62 | return "%s woke up" % thread_name(self.wakee) | ||
63 | |||
64 | class RunqueueEventFork: | ||
65 | @staticmethod | ||
66 | def color(): | ||
67 | return (0, 0xff, 0) | ||
68 | |||
69 | def __init__(self, child): | ||
70 | self.child = child | ||
71 | |||
72 | def __repr__(self): | ||
73 | return "new forked task %s" % thread_name(self.child) | ||
74 | |||
75 | class RunqueueMigrateIn: | ||
76 | @staticmethod | ||
77 | def color(): | ||
78 | return (0, 0xf0, 0xff) | ||
79 | |||
80 | def __init__(self, new): | ||
81 | self.new = new | ||
82 | |||
83 | def __repr__(self): | ||
84 | return "task migrated in %s" % thread_name(self.new) | ||
85 | |||
86 | class RunqueueMigrateOut: | ||
87 | @staticmethod | ||
88 | def color(): | ||
89 | return (0xff, 0, 0xff) | ||
90 | |||
91 | def __init__(self, old): | ||
92 | self.old = old | ||
93 | |||
94 | def __repr__(self): | ||
95 | return "task migrated out %s" % thread_name(self.old) | ||
96 | |||
97 | class RunqueueSnapshot: | ||
98 | def __init__(self, tasks = [0], event = RunqueueEventUnknown()): | ||
99 | self.tasks = tuple(tasks) | ||
100 | self.event = event | ||
101 | |||
102 | def sched_switch(self, prev, prev_state, next): | ||
103 | event = RunqueueEventUnknown() | ||
104 | |||
105 | if taskState(prev_state) == "R" and next in self.tasks \ | ||
106 | and prev in self.tasks: | ||
107 | return self | ||
108 | |||
109 | if taskState(prev_state) != "R": | ||
110 | event = RunqueueEventSleep(prev) | ||
111 | |||
112 | next_tasks = list(self.tasks[:]) | ||
113 | if prev in self.tasks: | ||
114 | if taskState(prev_state) != "R": | ||
115 | next_tasks.remove(prev) | ||
116 | elif taskState(prev_state) == "R": | ||
117 | next_tasks.append(prev) | ||
118 | |||
119 | if next not in next_tasks: | ||
120 | next_tasks.append(next) | ||
121 | |||
122 | return RunqueueSnapshot(next_tasks, event) | ||
123 | |||
124 | def migrate_out(self, old): | ||
125 | if old not in self.tasks: | ||
126 | return self | ||
127 | next_tasks = [task for task in self.tasks if task != old] | ||
128 | |||
129 | return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old)) | ||
130 | |||
131 | def __migrate_in(self, new, event): | ||
132 | if new in self.tasks: | ||
133 | self.event = event | ||
134 | return self | ||
135 | next_tasks = self.tasks[:] + tuple([new]) | ||
136 | |||
137 | return RunqueueSnapshot(next_tasks, event) | ||
138 | |||
139 | def migrate_in(self, new): | ||
140 | return self.__migrate_in(new, RunqueueMigrateIn(new)) | ||
141 | |||
142 | def wake_up(self, new): | ||
143 | return self.__migrate_in(new, RunqueueEventWakeup(new)) | ||
144 | |||
145 | def wake_up_new(self, new): | ||
146 | return self.__migrate_in(new, RunqueueEventFork(new)) | ||
147 | |||
148 | def load(self): | ||
149 | """ Provide the number of tasks on the runqueue. | ||
150 | Don't count idle""" | ||
151 | return len(self.tasks) - 1 | ||
152 | |||
153 | def __repr__(self): | ||
154 | ret = self.tasks.__repr__() | ||
155 | ret += self.origin_tostring() | ||
156 | |||
157 | return ret | ||
158 | |||
159 | class TimeSlice: | ||
160 | def __init__(self, start, prev): | ||
161 | self.start = start | ||
162 | self.prev = prev | ||
163 | self.end = start | ||
164 | # cpus that triggered the event | ||
165 | self.event_cpus = [] | ||
166 | if prev is not None: | ||
167 | self.total_load = prev.total_load | ||
168 | self.rqs = prev.rqs.copy() | ||
169 | else: | ||
170 | self.rqs = defaultdict(RunqueueSnapshot) | ||
171 | self.total_load = 0 | ||
172 | |||
173 | def __update_total_load(self, old_rq, new_rq): | ||
174 | diff = new_rq.load() - old_rq.load() | ||
175 | self.total_load += diff | ||
176 | |||
177 | def sched_switch(self, ts_list, prev, prev_state, next, cpu): | ||
178 | old_rq = self.prev.rqs[cpu] | ||
179 | new_rq = old_rq.sched_switch(prev, prev_state, next) | ||
180 | |||
181 | if old_rq is new_rq: | ||
182 | return | ||
183 | |||
184 | self.rqs[cpu] = new_rq | ||
185 | self.__update_total_load(old_rq, new_rq) | ||
186 | ts_list.append(self) | ||
187 | self.event_cpus = [cpu] | ||
188 | |||
189 | def migrate(self, ts_list, new, old_cpu, new_cpu): | ||
190 | if old_cpu == new_cpu: | ||
191 | return | ||
192 | old_rq = self.prev.rqs[old_cpu] | ||
193 | out_rq = old_rq.migrate_out(new) | ||
194 | self.rqs[old_cpu] = out_rq | ||
195 | self.__update_total_load(old_rq, out_rq) | ||
196 | |||
197 | new_rq = self.prev.rqs[new_cpu] | ||
198 | in_rq = new_rq.migrate_in(new) | ||
199 | self.rqs[new_cpu] = in_rq | ||
200 | self.__update_total_load(new_rq, in_rq) | ||
201 | |||
202 | ts_list.append(self) | ||
203 | |||
204 | if old_rq is not out_rq: | ||
205 | self.event_cpus.append(old_cpu) | ||
206 | self.event_cpus.append(new_cpu) | ||
207 | |||
208 | def wake_up(self, ts_list, pid, cpu, fork): | ||
209 | old_rq = self.prev.rqs[cpu] | ||
210 | if fork: | ||
211 | new_rq = old_rq.wake_up_new(pid) | ||
212 | else: | ||
213 | new_rq = old_rq.wake_up(pid) | ||
214 | |||
215 | if new_rq is old_rq: | ||
216 | return | ||
217 | self.rqs[cpu] = new_rq | ||
218 | self.__update_total_load(old_rq, new_rq) | ||
219 | ts_list.append(self) | ||
220 | self.event_cpus = [cpu] | ||
221 | |||
222 | def next(self, t): | ||
223 | self.end = t | ||
224 | return TimeSlice(t, self) | ||
225 | |||
226 | class TimeSliceList(UserList): | ||
227 | def __init__(self, arg = []): | ||
228 | self.data = arg | ||
229 | |||
230 | def get_time_slice(self, ts): | ||
231 | if len(self.data) == 0: | ||
232 | slice = TimeSlice(ts, TimeSlice(-1, None)) | ||
233 | else: | ||
234 | slice = self.data[-1].next(ts) | ||
235 | return slice | ||
236 | |||
237 | def find_time_slice(self, ts): | ||
238 | start = 0 | ||
239 | end = len(self.data) | ||
240 | found = -1 | ||
241 | searching = True | ||
242 | while searching: | ||
243 | if start == end or start == end - 1: | ||
244 | searching = False | ||
245 | |||
246 | i = (end + start) / 2 | ||
247 | if self.data[i].start <= ts and self.data[i].end >= ts: | ||
248 | found = i | ||
249 | end = i | ||
250 | continue | ||
251 | |||
252 | if self.data[i].end < ts: | ||
253 | start = i | ||
254 | |||
255 | elif self.data[i].start > ts: | ||
256 | end = i | ||
257 | |||
258 | return found | ||
259 | |||
260 | def set_root_win(self, win): | ||
261 | self.root_win = win | ||
262 | |||
263 | def mouse_down(self, cpu, t): | ||
264 | idx = self.find_time_slice(t) | ||
265 | if idx == -1: | ||
266 | return | ||
267 | |||
268 | ts = self[idx] | ||
269 | rq = ts.rqs[cpu] | ||
270 | raw = "CPU: %d\n" % cpu | ||
271 | raw += "Last event : %s\n" % rq.event.__repr__() | ||
272 | raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000) | ||
273 | raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6)) | ||
274 | raw += "Load = %d\n" % rq.load() | ||
275 | for t in rq.tasks: | ||
276 | raw += "%s \n" % thread_name(t) | ||
277 | |||
278 | self.root_win.update_summary(raw) | ||
279 | |||
280 | def update_rectangle_cpu(self, slice, cpu): | ||
281 | rq = slice.rqs[cpu] | ||
282 | |||
283 | if slice.total_load != 0: | ||
284 | load_rate = rq.load() / float(slice.total_load) | ||
285 | else: | ||
286 | load_rate = 0 | ||
287 | |||
288 | red_power = int(0xff - (0xff * load_rate)) | ||
289 | color = (0xff, red_power, red_power) | ||
290 | |||
291 | top_color = None | ||
292 | |||
293 | if cpu in slice.event_cpus: | ||
294 | top_color = rq.event.color() | ||
295 | |||
296 | self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end) | ||
297 | |||
298 | def fill_zone(self, start, end): | ||
299 | i = self.find_time_slice(start) | ||
300 | if i == -1: | ||
301 | return | ||
302 | |||
303 | for i in xrange(i, len(self.data)): | ||
304 | timeslice = self.data[i] | ||
305 | if timeslice.start > end: | ||
306 | return | ||
307 | |||
308 | for cpu in timeslice.rqs: | ||
309 | self.update_rectangle_cpu(timeslice, cpu) | ||
310 | |||
311 | def interval(self): | ||
312 | if len(self.data) == 0: | ||
313 | return (0, 0) | ||
314 | |||
315 | return (self.data[0].start, self.data[-1].end) | ||
316 | |||
317 | def nr_rectangles(self): | ||
318 | last_ts = self.data[-1] | ||
319 | max_cpu = 0 | ||
320 | for cpu in last_ts.rqs: | ||
321 | if cpu > max_cpu: | ||
322 | max_cpu = cpu | ||
323 | return max_cpu | ||
324 | |||
325 | |||
326 | class SchedEventProxy: | ||
327 | def __init__(self): | ||
328 | self.current_tsk = defaultdict(lambda : -1) | ||
329 | self.timeslices = TimeSliceList() | ||
330 | |||
331 | def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state, | ||
332 | next_comm, next_pid, next_prio): | ||
333 | """ Ensure the task we sched out this cpu is really the one | ||
334 | we logged. Otherwise we may have missed traces """ | ||
335 | |||
336 | on_cpu_task = self.current_tsk[headers.cpu] | ||
337 | |||
338 | if on_cpu_task != -1 and on_cpu_task != prev_pid: | ||
339 | print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \ | ||
340 | (headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid) | ||
341 | |||
342 | threads[prev_pid] = prev_comm | ||
343 | threads[next_pid] = next_comm | ||
344 | self.current_tsk[headers.cpu] = next_pid | ||
345 | |||
346 | ts = self.timeslices.get_time_slice(headers.ts()) | ||
347 | ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu) | ||
348 | |||
349 | def migrate(self, headers, pid, prio, orig_cpu, dest_cpu): | ||
350 | ts = self.timeslices.get_time_slice(headers.ts()) | ||
351 | ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu) | ||
352 | |||
353 | def wake_up(self, headers, comm, pid, success, target_cpu, fork): | ||
354 | if success == 0: | ||
355 | return | ||
356 | ts = self.timeslices.get_time_slice(headers.ts()) | ||
357 | ts.wake_up(self.timeslices, pid, target_cpu, fork) | ||
358 | |||
359 | |||
360 | def trace_begin(): | ||
361 | global parser | ||
362 | parser = SchedEventProxy() | ||
363 | |||
364 | def trace_end(): | ||
365 | app = wx.App(False) | ||
366 | timeslices = parser.timeslices | ||
367 | frame = RootFrame(timeslices, "Migration") | ||
368 | app.MainLoop() | ||
369 | |||
370 | def sched__sched_stat_runtime(event_name, context, common_cpu, | ||
371 | common_secs, common_nsecs, common_pid, common_comm, | ||
372 | comm, pid, runtime, vruntime): | ||
373 | pass | ||
374 | |||
375 | def sched__sched_stat_iowait(event_name, context, common_cpu, | ||
376 | common_secs, common_nsecs, common_pid, common_comm, | ||
377 | comm, pid, delay): | ||
378 | pass | ||
379 | |||
380 | def sched__sched_stat_sleep(event_name, context, common_cpu, | ||
381 | common_secs, common_nsecs, common_pid, common_comm, | ||
382 | comm, pid, delay): | ||
383 | pass | ||
384 | |||
385 | def sched__sched_stat_wait(event_name, context, common_cpu, | ||
386 | common_secs, common_nsecs, common_pid, common_comm, | ||
387 | comm, pid, delay): | ||
388 | pass | ||
389 | |||
390 | def sched__sched_process_fork(event_name, context, common_cpu, | ||
391 | common_secs, common_nsecs, common_pid, common_comm, | ||
392 | parent_comm, parent_pid, child_comm, child_pid): | ||
393 | pass | ||
394 | |||
395 | def sched__sched_process_wait(event_name, context, common_cpu, | ||
396 | common_secs, common_nsecs, common_pid, common_comm, | ||
397 | comm, pid, prio): | ||
398 | pass | ||
399 | |||
400 | def sched__sched_process_exit(event_name, context, common_cpu, | ||
401 | common_secs, common_nsecs, common_pid, common_comm, | ||
402 | comm, pid, prio): | ||
403 | pass | ||
404 | |||
405 | def sched__sched_process_free(event_name, context, common_cpu, | ||
406 | common_secs, common_nsecs, common_pid, common_comm, | ||
407 | comm, pid, prio): | ||
408 | pass | ||
409 | |||
410 | def sched__sched_migrate_task(event_name, context, common_cpu, | ||
411 | common_secs, common_nsecs, common_pid, common_comm, | ||
412 | comm, pid, prio, orig_cpu, | ||
413 | dest_cpu): | ||
414 | headers = EventHeaders(common_cpu, common_secs, common_nsecs, | ||
415 | common_pid, common_comm) | ||
416 | parser.migrate(headers, pid, prio, orig_cpu, dest_cpu) | ||
417 | |||
418 | def sched__sched_switch(event_name, context, common_cpu, | ||
419 | common_secs, common_nsecs, common_pid, common_comm, | ||
420 | prev_comm, prev_pid, prev_prio, prev_state, | ||
421 | next_comm, next_pid, next_prio): | ||
422 | |||
423 | headers = EventHeaders(common_cpu, common_secs, common_nsecs, | ||
424 | common_pid, common_comm) | ||
425 | parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state, | ||
426 | next_comm, next_pid, next_prio) | ||
427 | |||
428 | def sched__sched_wakeup_new(event_name, context, common_cpu, | ||
429 | common_secs, common_nsecs, common_pid, common_comm, | ||
430 | comm, pid, prio, success, | ||
431 | target_cpu): | ||
432 | headers = EventHeaders(common_cpu, common_secs, common_nsecs, | ||
433 | common_pid, common_comm) | ||
434 | parser.wake_up(headers, comm, pid, success, target_cpu, 1) | ||
435 | |||
436 | def sched__sched_wakeup(event_name, context, common_cpu, | ||
437 | common_secs, common_nsecs, common_pid, common_comm, | ||
438 | comm, pid, prio, success, | ||
439 | target_cpu): | ||
440 | headers = EventHeaders(common_cpu, common_secs, common_nsecs, | ||
441 | common_pid, common_comm) | ||
442 | parser.wake_up(headers, comm, pid, success, target_cpu, 0) | ||
443 | |||
444 | def sched__sched_wait_task(event_name, context, common_cpu, | ||
445 | common_secs, common_nsecs, common_pid, common_comm, | ||
446 | comm, pid, prio): | ||
447 | pass | ||
448 | |||
449 | def sched__sched_kthread_stop_ret(event_name, context, common_cpu, | ||
450 | common_secs, common_nsecs, common_pid, common_comm, | ||
451 | ret): | ||
452 | pass | ||
453 | |||
454 | def sched__sched_kthread_stop(event_name, context, common_cpu, | ||
455 | common_secs, common_nsecs, common_pid, common_comm, | ||
456 | comm, pid): | ||
457 | pass | ||
458 | |||
459 | def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs, | ||
460 | common_pid, common_comm): | ||
461 | pass | ||