aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_trace.h
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2009-08-25 06:15:50 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2009-09-22 20:05:21 -0400
commit1c5d22f76dc721f3acb7a3dadc657a221e487fb7 (patch)
tree99a69f1be4f10d1e38af2c5ece4b5905f7a5701a /drivers/gpu/drm/i915/i915_trace.h
parent74dff282237ea8c0a5df1afd8526eac4b6cee063 (diff)
drm/i915: Add tracepoints
By adding tracepoint equivalents for WATCH_BUF/EXEC we are able to monitor the lifetimes of objects, requests and significant events. These events can then be probed using the tracing frameworks, such as systemtap and, in particular, perf. For example to record the stack trace for every GPU stall during a run, use $ perf record -e i915:i915_gem_request_wait_begin -c 1 -g And $ perf report to view the results. [Updated to fix compilation issues caused.] Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: Ben Gamari <bgamari@gmail.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_trace.h')
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h315
1 files changed, 315 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
new file mode 100644
index 000000000000..5567a40816f3
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -0,0 +1,315 @@
1#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
2#define _I915_TRACE_H_
3
4#include <linux/stringify.h>
5#include <linux/types.h>
6#include <linux/tracepoint.h>
7
8#include <drm/drmP.h>
9
10#undef TRACE_SYSTEM
11#define TRACE_SYSTEM i915
12#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
13#define TRACE_INCLUDE_FILE i915_trace
14
15/* object tracking */
16
17TRACE_EVENT(i915_gem_object_create,
18
19 TP_PROTO(struct drm_gem_object *obj),
20
21 TP_ARGS(obj),
22
23 TP_STRUCT__entry(
24 __field(struct drm_gem_object *, obj)
25 __field(u32, size)
26 ),
27
28 TP_fast_assign(
29 __entry->obj = obj;
30 __entry->size = obj->size;
31 ),
32
33 TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
34);
35
36TRACE_EVENT(i915_gem_object_bind,
37
38 TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset),
39
40 TP_ARGS(obj, gtt_offset),
41
42 TP_STRUCT__entry(
43 __field(struct drm_gem_object *, obj)
44 __field(u32, gtt_offset)
45 ),
46
47 TP_fast_assign(
48 __entry->obj = obj;
49 __entry->gtt_offset = gtt_offset;
50 ),
51
52 TP_printk("obj=%p, gtt_offset=%08x",
53 __entry->obj, __entry->gtt_offset)
54);
55
56TRACE_EVENT(i915_gem_object_clflush,
57
58 TP_PROTO(struct drm_gem_object *obj),
59
60 TP_ARGS(obj),
61
62 TP_STRUCT__entry(
63 __field(struct drm_gem_object *, obj)
64 ),
65
66 TP_fast_assign(
67 __entry->obj = obj;
68 ),
69
70 TP_printk("obj=%p", __entry->obj)
71);
72
73TRACE_EVENT(i915_gem_object_change_domain,
74
75 TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
76
77 TP_ARGS(obj, old_read_domains, old_write_domain),
78
79 TP_STRUCT__entry(
80 __field(struct drm_gem_object *, obj)
81 __field(u32, read_domains)
82 __field(u32, write_domain)
83 ),
84
85 TP_fast_assign(
86 __entry->obj = obj;
87 __entry->read_domains = obj->read_domains | (old_read_domains << 16);
88 __entry->write_domain = obj->write_domain | (old_write_domain << 16);
89 ),
90
91 TP_printk("obj=%p, read=%04x, write=%04x",
92 __entry->obj,
93 __entry->read_domains, __entry->write_domain)
94);
95
96TRACE_EVENT(i915_gem_object_get_fence,
97
98 TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode),
99
100 TP_ARGS(obj, fence, tiling_mode),
101
102 TP_STRUCT__entry(
103 __field(struct drm_gem_object *, obj)
104 __field(int, fence)
105 __field(int, tiling_mode)
106 ),
107
108 TP_fast_assign(
109 __entry->obj = obj;
110 __entry->fence = fence;
111 __entry->tiling_mode = tiling_mode;
112 ),
113
114 TP_printk("obj=%p, fence=%d, tiling=%d",
115 __entry->obj, __entry->fence, __entry->tiling_mode)
116);
117
118TRACE_EVENT(i915_gem_object_unbind,
119
120 TP_PROTO(struct drm_gem_object *obj),
121
122 TP_ARGS(obj),
123
124 TP_STRUCT__entry(
125 __field(struct drm_gem_object *, obj)
126 ),
127
128 TP_fast_assign(
129 __entry->obj = obj;
130 ),
131
132 TP_printk("obj=%p", __entry->obj)
133);
134
135TRACE_EVENT(i915_gem_object_destroy,
136
137 TP_PROTO(struct drm_gem_object *obj),
138
139 TP_ARGS(obj),
140
141 TP_STRUCT__entry(
142 __field(struct drm_gem_object *, obj)
143 ),
144
145 TP_fast_assign(
146 __entry->obj = obj;
147 ),
148
149 TP_printk("obj=%p", __entry->obj)
150);
151
152/* batch tracing */
153
154TRACE_EVENT(i915_gem_request_submit,
155
156 TP_PROTO(struct drm_device *dev, u32 seqno),
157
158 TP_ARGS(dev, seqno),
159
160 TP_STRUCT__entry(
161 __field(struct drm_device *, dev)
162 __field(u32, seqno)
163 ),
164
165 TP_fast_assign(
166 __entry->dev = dev;
167 __entry->seqno = seqno;
168 ),
169
170 TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
171);
172
173TRACE_EVENT(i915_gem_request_flush,
174
175 TP_PROTO(struct drm_device *dev, u32 seqno,
176 u32 flush_domains, u32 invalidate_domains),
177
178 TP_ARGS(dev, seqno, flush_domains, invalidate_domains),
179
180 TP_STRUCT__entry(
181 __field(struct drm_device *, dev)
182 __field(u32, seqno)
183 __field(u32, flush_domains)
184 __field(u32, invalidate_domains)
185 ),
186
187 TP_fast_assign(
188 __entry->dev = dev;
189 __entry->seqno = seqno;
190 __entry->flush_domains = flush_domains;
191 __entry->invalidate_domains = invalidate_domains;
192 ),
193
194 TP_printk("dev=%p, seqno=%u, flush=%04x, invalidate=%04x",
195 __entry->dev, __entry->seqno,
196 __entry->flush_domains, __entry->invalidate_domains)
197);
198
199
200TRACE_EVENT(i915_gem_request_complete,
201
202 TP_PROTO(struct drm_device *dev, u32 seqno),
203
204 TP_ARGS(dev, seqno),
205
206 TP_STRUCT__entry(
207 __field(struct drm_device *, dev)
208 __field(u32, seqno)
209 ),
210
211 TP_fast_assign(
212 __entry->dev = dev;
213 __entry->seqno = seqno;
214 ),
215
216 TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
217);
218
219TRACE_EVENT(i915_gem_request_retire,
220
221 TP_PROTO(struct drm_device *dev, u32 seqno),
222
223 TP_ARGS(dev, seqno),
224
225 TP_STRUCT__entry(
226 __field(struct drm_device *, dev)
227 __field(u32, seqno)
228 ),
229
230 TP_fast_assign(
231 __entry->dev = dev;
232 __entry->seqno = seqno;
233 ),
234
235 TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
236);
237
238TRACE_EVENT(i915_gem_request_wait_begin,
239
240 TP_PROTO(struct drm_device *dev, u32 seqno),
241
242 TP_ARGS(dev, seqno),
243
244 TP_STRUCT__entry(
245 __field(struct drm_device *, dev)
246 __field(u32, seqno)
247 ),
248
249 TP_fast_assign(
250 __entry->dev = dev;
251 __entry->seqno = seqno;
252 ),
253
254 TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
255);
256
257TRACE_EVENT(i915_gem_request_wait_end,
258
259 TP_PROTO(struct drm_device *dev, u32 seqno),
260
261 TP_ARGS(dev, seqno),
262
263 TP_STRUCT__entry(
264 __field(struct drm_device *, dev)
265 __field(u32, seqno)
266 ),
267
268 TP_fast_assign(
269 __entry->dev = dev;
270 __entry->seqno = seqno;
271 ),
272
273 TP_printk("dev=%p, seqno=%u", __entry->dev, __entry->seqno)
274);
275
276TRACE_EVENT(i915_ring_wait_begin,
277
278 TP_PROTO(struct drm_device *dev),
279
280 TP_ARGS(dev),
281
282 TP_STRUCT__entry(
283 __field(struct drm_device *, dev)
284 ),
285
286 TP_fast_assign(
287 __entry->dev = dev;
288 ),
289
290 TP_printk("dev=%p", __entry->dev)
291);
292
293TRACE_EVENT(i915_ring_wait_end,
294
295 TP_PROTO(struct drm_device *dev),
296
297 TP_ARGS(dev),
298
299 TP_STRUCT__entry(
300 __field(struct drm_device *, dev)
301 ),
302
303 TP_fast_assign(
304 __entry->dev = dev;
305 ),
306
307 TP_printk("dev=%p", __entry->dev)
308);
309
310#endif /* _I915_TRACE_H_ */
311
312/* This part must be outside protection */
313#undef TRACE_INCLUDE_PATH
314#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
315#include <trace/define_trace.h>