aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_trace.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_trace.h')
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h328
1 files changed, 220 insertions, 108 deletions
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index fea97a21cc14..d623fefbfaca 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -6,6 +6,8 @@
6#include <linux/tracepoint.h> 6#include <linux/tracepoint.h>
7 7
8#include <drm/drmP.h> 8#include <drm/drmP.h>
9#include "i915_drv.h"
10#include "intel_ringbuffer.h"
9 11
10#undef TRACE_SYSTEM 12#undef TRACE_SYSTEM
11#define TRACE_SYSTEM i915 13#define TRACE_SYSTEM i915
@@ -15,97 +17,160 @@
15/* object tracking */ 17/* object tracking */
16 18
17TRACE_EVENT(i915_gem_object_create, 19TRACE_EVENT(i915_gem_object_create,
18 20 TP_PROTO(struct drm_i915_gem_object *obj),
19 TP_PROTO(struct drm_gem_object *obj),
20
21 TP_ARGS(obj), 21 TP_ARGS(obj),
22 22
23 TP_STRUCT__entry( 23 TP_STRUCT__entry(
24 __field(struct drm_gem_object *, obj) 24 __field(struct drm_i915_gem_object *, obj)
25 __field(u32, size) 25 __field(u32, size)
26 ), 26 ),
27 27
28 TP_fast_assign( 28 TP_fast_assign(
29 __entry->obj = obj; 29 __entry->obj = obj;
30 __entry->size = obj->size; 30 __entry->size = obj->base.size;
31 ), 31 ),
32 32
33 TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) 33 TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
34); 34);
35 35
36TRACE_EVENT(i915_gem_object_bind, 36TRACE_EVENT(i915_gem_object_bind,
37 TP_PROTO(struct drm_i915_gem_object *obj, bool mappable),
38 TP_ARGS(obj, mappable),
37 39
38 TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset), 40 TP_STRUCT__entry(
41 __field(struct drm_i915_gem_object *, obj)
42 __field(u32, offset)
43 __field(u32, size)
44 __field(bool, mappable)
45 ),
39 46
40 TP_ARGS(obj, gtt_offset), 47 TP_fast_assign(
48 __entry->obj = obj;
49 __entry->offset = obj->gtt_space->start;
50 __entry->size = obj->gtt_space->size;
51 __entry->mappable = mappable;
52 ),
53
54 TP_printk("obj=%p, offset=%08x size=%x%s",
55 __entry->obj, __entry->offset, __entry->size,
56 __entry->mappable ? ", mappable" : "")
57);
58
59TRACE_EVENT(i915_gem_object_unbind,
60 TP_PROTO(struct drm_i915_gem_object *obj),
61 TP_ARGS(obj),
41 62
42 TP_STRUCT__entry( 63 TP_STRUCT__entry(
43 __field(struct drm_gem_object *, obj) 64 __field(struct drm_i915_gem_object *, obj)
44 __field(u32, gtt_offset) 65 __field(u32, offset)
66 __field(u32, size)
45 ), 67 ),
46 68
47 TP_fast_assign( 69 TP_fast_assign(
48 __entry->obj = obj; 70 __entry->obj = obj;
49 __entry->gtt_offset = gtt_offset; 71 __entry->offset = obj->gtt_space->start;
72 __entry->size = obj->gtt_space->size;
50 ), 73 ),
51 74
52 TP_printk("obj=%p, gtt_offset=%08x", 75 TP_printk("obj=%p, offset=%08x size=%x",
53 __entry->obj, __entry->gtt_offset) 76 __entry->obj, __entry->offset, __entry->size)
54); 77);
55 78
56TRACE_EVENT(i915_gem_object_change_domain, 79TRACE_EVENT(i915_gem_object_change_domain,
57 80 TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
58 TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), 81 TP_ARGS(obj, old_read, old_write),
59
60 TP_ARGS(obj, old_read_domains, old_write_domain),
61 82
62 TP_STRUCT__entry( 83 TP_STRUCT__entry(
63 __field(struct drm_gem_object *, obj) 84 __field(struct drm_i915_gem_object *, obj)
64 __field(u32, read_domains) 85 __field(u32, read_domains)
65 __field(u32, write_domain) 86 __field(u32, write_domain)
66 ), 87 ),
67 88
68 TP_fast_assign( 89 TP_fast_assign(
69 __entry->obj = obj; 90 __entry->obj = obj;
70 __entry->read_domains = obj->read_domains | (old_read_domains << 16); 91 __entry->read_domains = obj->base.read_domains | (old_read << 16);
71 __entry->write_domain = obj->write_domain | (old_write_domain << 16); 92 __entry->write_domain = obj->base.write_domain | (old_write << 16);
72 ), 93 ),
73 94
74 TP_printk("obj=%p, read=%04x, write=%04x", 95 TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x",
75 __entry->obj, 96 __entry->obj,
76 __entry->read_domains, __entry->write_domain) 97 __entry->read_domains >> 16,
98 __entry->read_domains & 0xffff,
99 __entry->write_domain >> 16,
100 __entry->write_domain & 0xffff)
77); 101);
78 102
79TRACE_EVENT(i915_gem_object_get_fence, 103TRACE_EVENT(i915_gem_object_pwrite,
104 TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
105 TP_ARGS(obj, offset, len),
80 106
81 TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode), 107 TP_STRUCT__entry(
108 __field(struct drm_i915_gem_object *, obj)
109 __field(u32, offset)
110 __field(u32, len)
111 ),
82 112
83 TP_ARGS(obj, fence, tiling_mode), 113 TP_fast_assign(
114 __entry->obj = obj;
115 __entry->offset = offset;
116 __entry->len = len;
117 ),
118
119 TP_printk("obj=%p, offset=%u, len=%u",
120 __entry->obj, __entry->offset, __entry->len)
121);
122
123TRACE_EVENT(i915_gem_object_pread,
124 TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
125 TP_ARGS(obj, offset, len),
84 126
85 TP_STRUCT__entry( 127 TP_STRUCT__entry(
86 __field(struct drm_gem_object *, obj) 128 __field(struct drm_i915_gem_object *, obj)
87 __field(int, fence) 129 __field(u32, offset)
88 __field(int, tiling_mode) 130 __field(u32, len)
89 ), 131 ),
90 132
91 TP_fast_assign( 133 TP_fast_assign(
92 __entry->obj = obj; 134 __entry->obj = obj;
93 __entry->fence = fence; 135 __entry->offset = offset;
94 __entry->tiling_mode = tiling_mode; 136 __entry->len = len;
95 ), 137 ),
96 138
97 TP_printk("obj=%p, fence=%d, tiling=%d", 139 TP_printk("obj=%p, offset=%u, len=%u",
98 __entry->obj, __entry->fence, __entry->tiling_mode) 140 __entry->obj, __entry->offset, __entry->len)
99); 141);
100 142
101DECLARE_EVENT_CLASS(i915_gem_object, 143TRACE_EVENT(i915_gem_object_fault,
144 TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write),
145 TP_ARGS(obj, index, gtt, write),
146
147 TP_STRUCT__entry(
148 __field(struct drm_i915_gem_object *, obj)
149 __field(u32, index)
150 __field(bool, gtt)
151 __field(bool, write)
152 ),
153
154 TP_fast_assign(
155 __entry->obj = obj;
156 __entry->index = index;
157 __entry->gtt = gtt;
158 __entry->write = write;
159 ),
102 160
103 TP_PROTO(struct drm_gem_object *obj), 161 TP_printk("obj=%p, %s index=%u %s",
162 __entry->obj,
163 __entry->gtt ? "GTT" : "CPU",
164 __entry->index,
165 __entry->write ? ", writable" : "")
166);
104 167
168DECLARE_EVENT_CLASS(i915_gem_object,
169 TP_PROTO(struct drm_i915_gem_object *obj),
105 TP_ARGS(obj), 170 TP_ARGS(obj),
106 171
107 TP_STRUCT__entry( 172 TP_STRUCT__entry(
108 __field(struct drm_gem_object *, obj) 173 __field(struct drm_i915_gem_object *, obj)
109 ), 174 ),
110 175
111 TP_fast_assign( 176 TP_fast_assign(
@@ -116,160 +181,181 @@ DECLARE_EVENT_CLASS(i915_gem_object,
116); 181);
117 182
118DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, 183DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
119 184 TP_PROTO(struct drm_i915_gem_object *obj),
120 TP_PROTO(struct drm_gem_object *obj), 185 TP_ARGS(obj)
121
122 TP_ARGS(obj)
123); 186);
124 187
125DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind, 188DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
126 189 TP_PROTO(struct drm_i915_gem_object *obj),
127 TP_PROTO(struct drm_gem_object *obj),
128
129 TP_ARGS(obj) 190 TP_ARGS(obj)
130); 191);
131 192
132DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, 193TRACE_EVENT(i915_gem_evict,
194 TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable),
195 TP_ARGS(dev, size, align, mappable),
133 196
134 TP_PROTO(struct drm_gem_object *obj), 197 TP_STRUCT__entry(
198 __field(u32, dev)
199 __field(u32, size)
200 __field(u32, align)
201 __field(bool, mappable)
202 ),
135 203
136 TP_ARGS(obj) 204 TP_fast_assign(
205 __entry->dev = dev->primary->index;
206 __entry->size = size;
207 __entry->align = align;
208 __entry->mappable = mappable;
209 ),
210
211 TP_printk("dev=%d, size=%d, align=%d %s",
212 __entry->dev, __entry->size, __entry->align,
213 __entry->mappable ? ", mappable" : "")
137); 214);
138 215
139/* batch tracing */ 216TRACE_EVENT(i915_gem_evict_everything,
217 TP_PROTO(struct drm_device *dev, bool purgeable),
218 TP_ARGS(dev, purgeable),
140 219
141TRACE_EVENT(i915_gem_request_submit, 220 TP_STRUCT__entry(
221 __field(u32, dev)
222 __field(bool, purgeable)
223 ),
224
225 TP_fast_assign(
226 __entry->dev = dev->primary->index;
227 __entry->purgeable = purgeable;
228 ),
142 229
143 TP_PROTO(struct drm_device *dev, u32 seqno), 230 TP_printk("dev=%d%s",
231 __entry->dev,
232 __entry->purgeable ? ", purgeable only" : "")
233);
144 234
145 TP_ARGS(dev, seqno), 235TRACE_EVENT(i915_gem_ring_dispatch,
236 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
237 TP_ARGS(ring, seqno),
146 238
147 TP_STRUCT__entry( 239 TP_STRUCT__entry(
148 __field(u32, dev) 240 __field(u32, dev)
241 __field(u32, ring)
149 __field(u32, seqno) 242 __field(u32, seqno)
150 ), 243 ),
151 244
152 TP_fast_assign( 245 TP_fast_assign(
153 __entry->dev = dev->primary->index; 246 __entry->dev = ring->dev->primary->index;
247 __entry->ring = ring->id;
154 __entry->seqno = seqno; 248 __entry->seqno = seqno;
155 i915_trace_irq_get(dev, seqno); 249 i915_trace_irq_get(ring, seqno);
156 ), 250 ),
157 251
158 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) 252 TP_printk("dev=%u, ring=%u, seqno=%u",
253 __entry->dev, __entry->ring, __entry->seqno)
159); 254);
160 255
161TRACE_EVENT(i915_gem_request_flush, 256TRACE_EVENT(i915_gem_ring_flush,
162 257 TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush),
163 TP_PROTO(struct drm_device *dev, u32 seqno, 258 TP_ARGS(ring, invalidate, flush),
164 u32 flush_domains, u32 invalidate_domains),
165
166 TP_ARGS(dev, seqno, flush_domains, invalidate_domains),
167 259
168 TP_STRUCT__entry( 260 TP_STRUCT__entry(
169 __field(u32, dev) 261 __field(u32, dev)
170 __field(u32, seqno) 262 __field(u32, ring)
171 __field(u32, flush_domains) 263 __field(u32, invalidate)
172 __field(u32, invalidate_domains) 264 __field(u32, flush)
173 ), 265 ),
174 266
175 TP_fast_assign( 267 TP_fast_assign(
176 __entry->dev = dev->primary->index; 268 __entry->dev = ring->dev->primary->index;
177 __entry->seqno = seqno; 269 __entry->ring = ring->id;
178 __entry->flush_domains = flush_domains; 270 __entry->invalidate = invalidate;
179 __entry->invalidate_domains = invalidate_domains; 271 __entry->flush = flush;
180 ), 272 ),
181 273
182 TP_printk("dev=%u, seqno=%u, flush=%04x, invalidate=%04x", 274 TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
183 __entry->dev, __entry->seqno, 275 __entry->dev, __entry->ring,
184 __entry->flush_domains, __entry->invalidate_domains) 276 __entry->invalidate, __entry->flush)
185); 277);
186 278
187DECLARE_EVENT_CLASS(i915_gem_request, 279DECLARE_EVENT_CLASS(i915_gem_request,
188 280 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
189 TP_PROTO(struct drm_device *dev, u32 seqno), 281 TP_ARGS(ring, seqno),
190
191 TP_ARGS(dev, seqno),
192 282
193 TP_STRUCT__entry( 283 TP_STRUCT__entry(
194 __field(u32, dev) 284 __field(u32, dev)
285 __field(u32, ring)
195 __field(u32, seqno) 286 __field(u32, seqno)
196 ), 287 ),
197 288
198 TP_fast_assign( 289 TP_fast_assign(
199 __entry->dev = dev->primary->index; 290 __entry->dev = ring->dev->primary->index;
291 __entry->ring = ring->id;
200 __entry->seqno = seqno; 292 __entry->seqno = seqno;
201 ), 293 ),
202 294
203 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) 295 TP_printk("dev=%u, ring=%u, seqno=%u",
296 __entry->dev, __entry->ring, __entry->seqno)
204); 297);
205 298
206DEFINE_EVENT(i915_gem_request, i915_gem_request_complete, 299DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
207 300 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
208 TP_PROTO(struct drm_device *dev, u32 seqno), 301 TP_ARGS(ring, seqno)
302);
209 303
210 TP_ARGS(dev, seqno) 304DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
305 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
306 TP_ARGS(ring, seqno)
211); 307);
212 308
213DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, 309DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
214 310 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
215 TP_PROTO(struct drm_device *dev, u32 seqno), 311 TP_ARGS(ring, seqno)
216
217 TP_ARGS(dev, seqno)
218); 312);
219 313
220DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin, 314DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin,
221 315 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
222 TP_PROTO(struct drm_device *dev, u32 seqno), 316 TP_ARGS(ring, seqno)
223
224 TP_ARGS(dev, seqno)
225); 317);
226 318
227DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, 319DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
228 320 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
229 TP_PROTO(struct drm_device *dev, u32 seqno), 321 TP_ARGS(ring, seqno)
230
231 TP_ARGS(dev, seqno)
232); 322);
233 323
234DECLARE_EVENT_CLASS(i915_ring, 324DECLARE_EVENT_CLASS(i915_ring,
235 325 TP_PROTO(struct intel_ring_buffer *ring),
236 TP_PROTO(struct drm_device *dev), 326 TP_ARGS(ring),
237
238 TP_ARGS(dev),
239 327
240 TP_STRUCT__entry( 328 TP_STRUCT__entry(
241 __field(u32, dev) 329 __field(u32, dev)
330 __field(u32, ring)
242 ), 331 ),
243 332
244 TP_fast_assign( 333 TP_fast_assign(
245 __entry->dev = dev->primary->index; 334 __entry->dev = ring->dev->primary->index;
335 __entry->ring = ring->id;
246 ), 336 ),
247 337
248 TP_printk("dev=%u", __entry->dev) 338 TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring)
249); 339);
250 340
251DEFINE_EVENT(i915_ring, i915_ring_wait_begin, 341DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
252 342 TP_PROTO(struct intel_ring_buffer *ring),
253 TP_PROTO(struct drm_device *dev), 343 TP_ARGS(ring)
254
255 TP_ARGS(dev)
256); 344);
257 345
258DEFINE_EVENT(i915_ring, i915_ring_wait_end, 346DEFINE_EVENT(i915_ring, i915_ring_wait_end,
259 347 TP_PROTO(struct intel_ring_buffer *ring),
260 TP_PROTO(struct drm_device *dev), 348 TP_ARGS(ring)
261
262 TP_ARGS(dev)
263); 349);
264 350
265TRACE_EVENT(i915_flip_request, 351TRACE_EVENT(i915_flip_request,
266 TP_PROTO(int plane, struct drm_gem_object *obj), 352 TP_PROTO(int plane, struct drm_i915_gem_object *obj),
267 353
268 TP_ARGS(plane, obj), 354 TP_ARGS(plane, obj),
269 355
270 TP_STRUCT__entry( 356 TP_STRUCT__entry(
271 __field(int, plane) 357 __field(int, plane)
272 __field(struct drm_gem_object *, obj) 358 __field(struct drm_i915_gem_object *, obj)
273 ), 359 ),
274 360
275 TP_fast_assign( 361 TP_fast_assign(
@@ -281,13 +367,13 @@ TRACE_EVENT(i915_flip_request,
281); 367);
282 368
283TRACE_EVENT(i915_flip_complete, 369TRACE_EVENT(i915_flip_complete,
284 TP_PROTO(int plane, struct drm_gem_object *obj), 370 TP_PROTO(int plane, struct drm_i915_gem_object *obj),
285 371
286 TP_ARGS(plane, obj), 372 TP_ARGS(plane, obj),
287 373
288 TP_STRUCT__entry( 374 TP_STRUCT__entry(
289 __field(int, plane) 375 __field(int, plane)
290 __field(struct drm_gem_object *, obj) 376 __field(struct drm_i915_gem_object *, obj)
291 ), 377 ),
292 378
293 TP_fast_assign( 379 TP_fast_assign(
@@ -298,6 +384,32 @@ TRACE_EVENT(i915_flip_complete,
298 TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) 384 TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
299); 385);
300 386
387TRACE_EVENT(i915_reg_rw,
388 TP_PROTO(bool write, u32 reg, u64 val, int len),
389
390 TP_ARGS(write, reg, val, len),
391
392 TP_STRUCT__entry(
393 __field(u64, val)
394 __field(u32, reg)
395 __field(u16, write)
396 __field(u16, len)
397 ),
398
399 TP_fast_assign(
400 __entry->val = (u64)val;
401 __entry->reg = reg;
402 __entry->write = write;
403 __entry->len = len;
404 ),
405
406 TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
407 __entry->write ? "write" : "read",
408 __entry->reg, __entry->len,
409 (u32)(__entry->val & 0xffffffff),
410 (u32)(__entry->val >> 32))
411);
412
301#endif /* _I915_TRACE_H_ */ 413#endif /* _I915_TRACE_H_ */
302 414
303/* This part must be outside protection */ 415/* This part must be outside protection */