aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_debugfs.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-01-24 16:17:06 -0500
committerDavid S. Miller <davem@davemloft.net>2011-01-24 16:17:06 -0500
commite92427b289d252cfbd4cb5282d92f4ce1a5bb1fb (patch)
tree6d30e5e7b7f8e9aaa51d43b7128ac56860fa03bb /drivers/gpu/drm/i915/i915_debugfs.c
parentc506653d35249bb4738bb139c24362e1ae724bc1 (diff)
parentec30f343d61391ab23705e50a525da1d55395780 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'drivers/gpu/drm/i915/i915_debugfs.c')
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c548
1 files changed, 399 insertions, 149 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1f4f3ceb63c7..3601466c5502 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -32,6 +32,7 @@
32#include "drmP.h" 32#include "drmP.h"
33#include "drm.h" 33#include "drm.h"
34#include "intel_drv.h" 34#include "intel_drv.h"
35#include "intel_ringbuffer.h"
35#include "i915_drm.h" 36#include "i915_drm.h"
36#include "i915_drv.h" 37#include "i915_drv.h"
37 38
@@ -72,7 +73,6 @@ static int i915_capabilities(struct seq_file *m, void *data)
72 B(is_broadwater); 73 B(is_broadwater);
73 B(is_crestline); 74 B(is_crestline);
74 B(has_fbc); 75 B(has_fbc);
75 B(has_rc6);
76 B(has_pipe_cxsr); 76 B(has_pipe_cxsr);
77 B(has_hotplug); 77 B(has_hotplug);
78 B(cursor_needs_physical); 78 B(cursor_needs_physical);
@@ -86,19 +86,19 @@ static int i915_capabilities(struct seq_file *m, void *data)
86 return 0; 86 return 0;
87} 87}
88 88
89static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv) 89static const char *get_pin_flag(struct drm_i915_gem_object *obj)
90{ 90{
91 if (obj_priv->user_pin_count > 0) 91 if (obj->user_pin_count > 0)
92 return "P"; 92 return "P";
93 else if (obj_priv->pin_count > 0) 93 else if (obj->pin_count > 0)
94 return "p"; 94 return "p";
95 else 95 else
96 return " "; 96 return " ";
97} 97}
98 98
99static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv) 99static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
100{ 100{
101 switch (obj_priv->tiling_mode) { 101 switch (obj->tiling_mode) {
102 default: 102 default:
103 case I915_TILING_NONE: return " "; 103 case I915_TILING_NONE: return " ";
104 case I915_TILING_X: return "X"; 104 case I915_TILING_X: return "X";
@@ -106,10 +106,19 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
106 } 106 }
107} 107}
108 108
109static const char *agp_type_str(int type)
110{
111 switch (type) {
112 case 0: return " uncached";
113 case 1: return " snooped";
114 default: return "";
115 }
116}
117
109static void 118static void
110describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 119describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
111{ 120{
112 seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s", 121 seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
113 &obj->base, 122 &obj->base,
114 get_pin_flag(obj), 123 get_pin_flag(obj),
115 get_tiling_flag(obj), 124 get_tiling_flag(obj),
@@ -117,6 +126,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
117 obj->base.read_domains, 126 obj->base.read_domains,
118 obj->base.write_domain, 127 obj->base.write_domain,
119 obj->last_rendering_seqno, 128 obj->last_rendering_seqno,
129 obj->last_fenced_seqno,
130 agp_type_str(obj->agp_type == AGP_USER_CACHED_MEMORY),
120 obj->dirty ? " dirty" : "", 131 obj->dirty ? " dirty" : "",
121 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 132 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
122 if (obj->base.name) 133 if (obj->base.name)
@@ -124,7 +135,17 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
124 if (obj->fence_reg != I915_FENCE_REG_NONE) 135 if (obj->fence_reg != I915_FENCE_REG_NONE)
125 seq_printf(m, " (fence: %d)", obj->fence_reg); 136 seq_printf(m, " (fence: %d)", obj->fence_reg);
126 if (obj->gtt_space != NULL) 137 if (obj->gtt_space != NULL)
127 seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset); 138 seq_printf(m, " (gtt offset: %08x, size: %08x)",
139 obj->gtt_offset, (unsigned int)obj->gtt_space->size);
140 if (obj->pin_mappable || obj->fault_mappable) {
141 char s[3], *t = s;
142 if (obj->pin_mappable)
143 *t++ = 'p';
144 if (obj->fault_mappable)
145 *t++ = 'f';
146 *t = '\0';
147 seq_printf(m, " (%s mappable)", s);
148 }
128 if (obj->ring != NULL) 149 if (obj->ring != NULL)
129 seq_printf(m, " (%s)", obj->ring->name); 150 seq_printf(m, " (%s)", obj->ring->name);
130} 151}
@@ -136,7 +157,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
136 struct list_head *head; 157 struct list_head *head;
137 struct drm_device *dev = node->minor->dev; 158 struct drm_device *dev = node->minor->dev;
138 drm_i915_private_t *dev_priv = dev->dev_private; 159 drm_i915_private_t *dev_priv = dev->dev_private;
139 struct drm_i915_gem_object *obj_priv; 160 struct drm_i915_gem_object *obj;
140 size_t total_obj_size, total_gtt_size; 161 size_t total_obj_size, total_gtt_size;
141 int count, ret; 162 int count, ret;
142 163
@@ -171,12 +192,12 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
171 } 192 }
172 193
173 total_obj_size = total_gtt_size = count = 0; 194 total_obj_size = total_gtt_size = count = 0;
174 list_for_each_entry(obj_priv, head, mm_list) { 195 list_for_each_entry(obj, head, mm_list) {
175 seq_printf(m, " "); 196 seq_printf(m, " ");
176 describe_obj(m, obj_priv); 197 describe_obj(m, obj);
177 seq_printf(m, "\n"); 198 seq_printf(m, "\n");
178 total_obj_size += obj_priv->base.size; 199 total_obj_size += obj->base.size;
179 total_gtt_size += obj_priv->gtt_space->size; 200 total_gtt_size += obj->gtt_space->size;
180 count++; 201 count++;
181 } 202 }
182 mutex_unlock(&dev->struct_mutex); 203 mutex_unlock(&dev->struct_mutex);
@@ -186,30 +207,116 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
186 return 0; 207 return 0;
187} 208}
188 209
210#define count_objects(list, member) do { \
211 list_for_each_entry(obj, list, member) { \
212 size += obj->gtt_space->size; \
213 ++count; \
214 if (obj->map_and_fenceable) { \
215 mappable_size += obj->gtt_space->size; \
216 ++mappable_count; \
217 } \
218 } \
219} while(0)
220
189static int i915_gem_object_info(struct seq_file *m, void* data) 221static int i915_gem_object_info(struct seq_file *m, void* data)
190{ 222{
191 struct drm_info_node *node = (struct drm_info_node *) m->private; 223 struct drm_info_node *node = (struct drm_info_node *) m->private;
192 struct drm_device *dev = node->minor->dev; 224 struct drm_device *dev = node->minor->dev;
193 struct drm_i915_private *dev_priv = dev->dev_private; 225 struct drm_i915_private *dev_priv = dev->dev_private;
226 u32 count, mappable_count;
227 size_t size, mappable_size;
228 struct drm_i915_gem_object *obj;
194 int ret; 229 int ret;
195 230
196 ret = mutex_lock_interruptible(&dev->struct_mutex); 231 ret = mutex_lock_interruptible(&dev->struct_mutex);
197 if (ret) 232 if (ret)
198 return ret; 233 return ret;
199 234
200 seq_printf(m, "%u objects\n", dev_priv->mm.object_count); 235 seq_printf(m, "%u objects, %zu bytes\n",
201 seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory); 236 dev_priv->mm.object_count,
202 seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count); 237 dev_priv->mm.object_memory);
203 seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory); 238
204 seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count); 239 size = count = mappable_size = mappable_count = 0;
205 seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory); 240 count_objects(&dev_priv->mm.gtt_list, gtt_list);
206 seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total); 241 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
242 count, mappable_count, size, mappable_size);
243
244 size = count = mappable_size = mappable_count = 0;
245 count_objects(&dev_priv->mm.active_list, mm_list);
246 count_objects(&dev_priv->mm.flushing_list, mm_list);
247 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
248 count, mappable_count, size, mappable_size);
249
250 size = count = mappable_size = mappable_count = 0;
251 count_objects(&dev_priv->mm.pinned_list, mm_list);
252 seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
253 count, mappable_count, size, mappable_size);
254
255 size = count = mappable_size = mappable_count = 0;
256 count_objects(&dev_priv->mm.inactive_list, mm_list);
257 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
258 count, mappable_count, size, mappable_size);
259
260 size = count = mappable_size = mappable_count = 0;
261 count_objects(&dev_priv->mm.deferred_free_list, mm_list);
262 seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
263 count, mappable_count, size, mappable_size);
264
265 size = count = mappable_size = mappable_count = 0;
266 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
267 if (obj->fault_mappable) {
268 size += obj->gtt_space->size;
269 ++count;
270 }
271 if (obj->pin_mappable) {
272 mappable_size += obj->gtt_space->size;
273 ++mappable_count;
274 }
275 }
276 seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
277 mappable_count, mappable_size);
278 seq_printf(m, "%u fault mappable objects, %zu bytes\n",
279 count, size);
280
281 seq_printf(m, "%zu [%zu] gtt total\n",
282 dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
207 283
208 mutex_unlock(&dev->struct_mutex); 284 mutex_unlock(&dev->struct_mutex);
209 285
210 return 0; 286 return 0;
211} 287}
212 288
289static int i915_gem_gtt_info(struct seq_file *m, void* data)
290{
291 struct drm_info_node *node = (struct drm_info_node *) m->private;
292 struct drm_device *dev = node->minor->dev;
293 struct drm_i915_private *dev_priv = dev->dev_private;
294 struct drm_i915_gem_object *obj;
295 size_t total_obj_size, total_gtt_size;
296 int count, ret;
297
298 ret = mutex_lock_interruptible(&dev->struct_mutex);
299 if (ret)
300 return ret;
301
302 total_obj_size = total_gtt_size = count = 0;
303 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
304 seq_printf(m, " ");
305 describe_obj(m, obj);
306 seq_printf(m, "\n");
307 total_obj_size += obj->base.size;
308 total_gtt_size += obj->gtt_space->size;
309 count++;
310 }
311
312 mutex_unlock(&dev->struct_mutex);
313
314 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
315 count, total_obj_size, total_gtt_size);
316
317 return 0;
318}
319
213 320
214static int i915_gem_pageflip_info(struct seq_file *m, void *data) 321static int i915_gem_pageflip_info(struct seq_file *m, void *data)
215{ 322{
@@ -243,14 +350,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
243 seq_printf(m, "%d prepares\n", work->pending); 350 seq_printf(m, "%d prepares\n", work->pending);
244 351
245 if (work->old_fb_obj) { 352 if (work->old_fb_obj) {
246 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj); 353 struct drm_i915_gem_object *obj = work->old_fb_obj;
247 if(obj_priv) 354 if (obj)
248 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); 355 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
249 } 356 }
250 if (work->pending_flip_obj) { 357 if (work->pending_flip_obj) {
251 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj); 358 struct drm_i915_gem_object *obj = work->pending_flip_obj;
252 if(obj_priv) 359 if (obj)
253 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); 360 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
254 } 361 }
255 } 362 }
256 spin_unlock_irqrestore(&dev->event_lock, flags); 363 spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -265,44 +372,80 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
265 struct drm_device *dev = node->minor->dev; 372 struct drm_device *dev = node->minor->dev;
266 drm_i915_private_t *dev_priv = dev->dev_private; 373 drm_i915_private_t *dev_priv = dev->dev_private;
267 struct drm_i915_gem_request *gem_request; 374 struct drm_i915_gem_request *gem_request;
268 int ret; 375 int ret, count;
269 376
270 ret = mutex_lock_interruptible(&dev->struct_mutex); 377 ret = mutex_lock_interruptible(&dev->struct_mutex);
271 if (ret) 378 if (ret)
272 return ret; 379 return ret;
273 380
274 seq_printf(m, "Request:\n"); 381 count = 0;
275 list_for_each_entry(gem_request, &dev_priv->render_ring.request_list, 382 if (!list_empty(&dev_priv->ring[RCS].request_list)) {
276 list) { 383 seq_printf(m, "Render requests:\n");
277 seq_printf(m, " %d @ %d\n", 384 list_for_each_entry(gem_request,
278 gem_request->seqno, 385 &dev_priv->ring[RCS].request_list,
279 (int) (jiffies - gem_request->emitted_jiffies)); 386 list) {
387 seq_printf(m, " %d @ %d\n",
388 gem_request->seqno,
389 (int) (jiffies - gem_request->emitted_jiffies));
390 }
391 count++;
392 }
393 if (!list_empty(&dev_priv->ring[VCS].request_list)) {
394 seq_printf(m, "BSD requests:\n");
395 list_for_each_entry(gem_request,
396 &dev_priv->ring[VCS].request_list,
397 list) {
398 seq_printf(m, " %d @ %d\n",
399 gem_request->seqno,
400 (int) (jiffies - gem_request->emitted_jiffies));
401 }
402 count++;
403 }
404 if (!list_empty(&dev_priv->ring[BCS].request_list)) {
405 seq_printf(m, "BLT requests:\n");
406 list_for_each_entry(gem_request,
407 &dev_priv->ring[BCS].request_list,
408 list) {
409 seq_printf(m, " %d @ %d\n",
410 gem_request->seqno,
411 (int) (jiffies - gem_request->emitted_jiffies));
412 }
413 count++;
280 } 414 }
281 mutex_unlock(&dev->struct_mutex); 415 mutex_unlock(&dev->struct_mutex);
282 416
417 if (count == 0)
418 seq_printf(m, "No requests\n");
419
283 return 0; 420 return 0;
284} 421}
285 422
423static void i915_ring_seqno_info(struct seq_file *m,
424 struct intel_ring_buffer *ring)
425{
426 if (ring->get_seqno) {
427 seq_printf(m, "Current sequence (%s): %d\n",
428 ring->name, ring->get_seqno(ring));
429 seq_printf(m, "Waiter sequence (%s): %d\n",
430 ring->name, ring->waiting_seqno);
431 seq_printf(m, "IRQ sequence (%s): %d\n",
432 ring->name, ring->irq_seqno);
433 }
434}
435
286static int i915_gem_seqno_info(struct seq_file *m, void *data) 436static int i915_gem_seqno_info(struct seq_file *m, void *data)
287{ 437{
288 struct drm_info_node *node = (struct drm_info_node *) m->private; 438 struct drm_info_node *node = (struct drm_info_node *) m->private;
289 struct drm_device *dev = node->minor->dev; 439 struct drm_device *dev = node->minor->dev;
290 drm_i915_private_t *dev_priv = dev->dev_private; 440 drm_i915_private_t *dev_priv = dev->dev_private;
291 int ret; 441 int ret, i;
292 442
293 ret = mutex_lock_interruptible(&dev->struct_mutex); 443 ret = mutex_lock_interruptible(&dev->struct_mutex);
294 if (ret) 444 if (ret)
295 return ret; 445 return ret;
296 446
297 if (dev_priv->render_ring.status_page.page_addr != NULL) { 447 for (i = 0; i < I915_NUM_RINGS; i++)
298 seq_printf(m, "Current sequence: %d\n", 448 i915_ring_seqno_info(m, &dev_priv->ring[i]);
299 dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
300 } else {
301 seq_printf(m, "Current sequence: hws uninitialized\n");
302 }
303 seq_printf(m, "Waiter sequence: %d\n",
304 dev_priv->mm.waiting_gem_seqno);
305 seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
306 449
307 mutex_unlock(&dev->struct_mutex); 450 mutex_unlock(&dev->struct_mutex);
308 451
@@ -315,7 +458,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
315 struct drm_info_node *node = (struct drm_info_node *) m->private; 458 struct drm_info_node *node = (struct drm_info_node *) m->private;
316 struct drm_device *dev = node->minor->dev; 459 struct drm_device *dev = node->minor->dev;
317 drm_i915_private_t *dev_priv = dev->dev_private; 460 drm_i915_private_t *dev_priv = dev->dev_private;
318 int ret; 461 int ret, i;
319 462
320 ret = mutex_lock_interruptible(&dev->struct_mutex); 463 ret = mutex_lock_interruptible(&dev->struct_mutex);
321 if (ret) 464 if (ret)
@@ -354,16 +497,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
354 } 497 }
355 seq_printf(m, "Interrupts received: %d\n", 498 seq_printf(m, "Interrupts received: %d\n",
356 atomic_read(&dev_priv->irq_received)); 499 atomic_read(&dev_priv->irq_received));
357 if (dev_priv->render_ring.status_page.page_addr != NULL) { 500 for (i = 0; i < I915_NUM_RINGS; i++) {
358 seq_printf(m, "Current sequence: %d\n", 501 if (IS_GEN6(dev)) {
359 dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring)); 502 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
360 } else { 503 dev_priv->ring[i].name,
361 seq_printf(m, "Current sequence: hws uninitialized\n"); 504 I915_READ_IMR(&dev_priv->ring[i]));
505 }
506 i915_ring_seqno_info(m, &dev_priv->ring[i]);
362 } 507 }
363 seq_printf(m, "Waiter sequence: %d\n",
364 dev_priv->mm.waiting_gem_seqno);
365 seq_printf(m, "IRQ sequence: %d\n",
366 dev_priv->mm.irq_gem_seqno);
367 mutex_unlock(&dev->struct_mutex); 508 mutex_unlock(&dev->struct_mutex);
368 509
369 return 0; 510 return 0;
@@ -383,29 +524,17 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
383 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 524 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
384 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 525 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
385 for (i = 0; i < dev_priv->num_fence_regs; i++) { 526 for (i = 0; i < dev_priv->num_fence_regs; i++) {
386 struct drm_gem_object *obj = dev_priv->fence_regs[i].obj; 527 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
387 528
388 if (obj == NULL) { 529 seq_printf(m, "Fenced object[%2d] = ", i);
389 seq_printf(m, "Fenced object[%2d] = unused\n", i); 530 if (obj == NULL)
390 } else { 531 seq_printf(m, "unused");
391 struct drm_i915_gem_object *obj_priv; 532 else
392 533 describe_obj(m, obj);
393 obj_priv = to_intel_bo(obj); 534 seq_printf(m, "\n");
394 seq_printf(m, "Fenced object[%2d] = %p: %s "
395 "%08x %08zx %08x %s %08x %08x %d",
396 i, obj, get_pin_flag(obj_priv),
397 obj_priv->gtt_offset,
398 obj->size, obj_priv->stride,
399 get_tiling_flag(obj_priv),
400 obj->read_domains, obj->write_domain,
401 obj_priv->last_rendering_seqno);
402 if (obj->name)
403 seq_printf(m, " (name: %d)", obj->name);
404 seq_printf(m, "\n");
405 }
406 } 535 }
407 mutex_unlock(&dev->struct_mutex);
408 536
537 mutex_unlock(&dev->struct_mutex);
409 return 0; 538 return 0;
410} 539}
411 540
@@ -414,10 +543,12 @@ static int i915_hws_info(struct seq_file *m, void *data)
414 struct drm_info_node *node = (struct drm_info_node *) m->private; 543 struct drm_info_node *node = (struct drm_info_node *) m->private;
415 struct drm_device *dev = node->minor->dev; 544 struct drm_device *dev = node->minor->dev;
416 drm_i915_private_t *dev_priv = dev->dev_private; 545 drm_i915_private_t *dev_priv = dev->dev_private;
417 int i; 546 struct intel_ring_buffer *ring;
418 volatile u32 *hws; 547 volatile u32 *hws;
548 int i;
419 549
420 hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr; 550 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
551 hws = (volatile u32 *)ring->status_page.page_addr;
421 if (hws == NULL) 552 if (hws == NULL)
422 return 0; 553 return 0;
423 554
@@ -431,14 +562,14 @@ static int i915_hws_info(struct seq_file *m, void *data)
431 562
432static void i915_dump_object(struct seq_file *m, 563static void i915_dump_object(struct seq_file *m,
433 struct io_mapping *mapping, 564 struct io_mapping *mapping,
434 struct drm_i915_gem_object *obj_priv) 565 struct drm_i915_gem_object *obj)
435{ 566{
436 int page, page_count, i; 567 int page, page_count, i;
437 568
438 page_count = obj_priv->base.size / PAGE_SIZE; 569 page_count = obj->base.size / PAGE_SIZE;
439 for (page = 0; page < page_count; page++) { 570 for (page = 0; page < page_count; page++) {
440 u32 *mem = io_mapping_map_wc(mapping, 571 u32 *mem = io_mapping_map_wc(mapping,
441 obj_priv->gtt_offset + page * PAGE_SIZE); 572 obj->gtt_offset + page * PAGE_SIZE);
442 for (i = 0; i < PAGE_SIZE; i += 4) 573 for (i = 0; i < PAGE_SIZE; i += 4)
443 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); 574 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
444 io_mapping_unmap(mem); 575 io_mapping_unmap(mem);
@@ -450,25 +581,21 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
450 struct drm_info_node *node = (struct drm_info_node *) m->private; 581 struct drm_info_node *node = (struct drm_info_node *) m->private;
451 struct drm_device *dev = node->minor->dev; 582 struct drm_device *dev = node->minor->dev;
452 drm_i915_private_t *dev_priv = dev->dev_private; 583 drm_i915_private_t *dev_priv = dev->dev_private;
453 struct drm_gem_object *obj; 584 struct drm_i915_gem_object *obj;
454 struct drm_i915_gem_object *obj_priv;
455 int ret; 585 int ret;
456 586
457 ret = mutex_lock_interruptible(&dev->struct_mutex); 587 ret = mutex_lock_interruptible(&dev->struct_mutex);
458 if (ret) 588 if (ret)
459 return ret; 589 return ret;
460 590
461 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { 591 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
462 obj = &obj_priv->base; 592 if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
463 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { 593 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
464 seq_printf(m, "--- gtt_offset = 0x%08x\n", 594 i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
465 obj_priv->gtt_offset);
466 i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
467 } 595 }
468 } 596 }
469 597
470 mutex_unlock(&dev->struct_mutex); 598 mutex_unlock(&dev->struct_mutex);
471
472 return 0; 599 return 0;
473} 600}
474 601
@@ -477,19 +604,21 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
477 struct drm_info_node *node = (struct drm_info_node *) m->private; 604 struct drm_info_node *node = (struct drm_info_node *) m->private;
478 struct drm_device *dev = node->minor->dev; 605 struct drm_device *dev = node->minor->dev;
479 drm_i915_private_t *dev_priv = dev->dev_private; 606 drm_i915_private_t *dev_priv = dev->dev_private;
607 struct intel_ring_buffer *ring;
480 int ret; 608 int ret;
481 609
482 ret = mutex_lock_interruptible(&dev->struct_mutex); 610 ret = mutex_lock_interruptible(&dev->struct_mutex);
483 if (ret) 611 if (ret)
484 return ret; 612 return ret;
485 613
486 if (!dev_priv->render_ring.gem_object) { 614 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
615 if (!ring->obj) {
487 seq_printf(m, "No ringbuffer setup\n"); 616 seq_printf(m, "No ringbuffer setup\n");
488 } else { 617 } else {
489 u8 *virt = dev_priv->render_ring.virtual_start; 618 u8 *virt = ring->virtual_start;
490 uint32_t off; 619 uint32_t off;
491 620
492 for (off = 0; off < dev_priv->render_ring.size; off += 4) { 621 for (off = 0; off < ring->size; off += 4) {
493 uint32_t *ptr = (uint32_t *)(virt + off); 622 uint32_t *ptr = (uint32_t *)(virt + off);
494 seq_printf(m, "%08x : %08x\n", off, *ptr); 623 seq_printf(m, "%08x : %08x\n", off, *ptr);
495 } 624 }
@@ -504,19 +633,38 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
504 struct drm_info_node *node = (struct drm_info_node *) m->private; 633 struct drm_info_node *node = (struct drm_info_node *) m->private;
505 struct drm_device *dev = node->minor->dev; 634 struct drm_device *dev = node->minor->dev;
506 drm_i915_private_t *dev_priv = dev->dev_private; 635 drm_i915_private_t *dev_priv = dev->dev_private;
507 unsigned int head, tail; 636 struct intel_ring_buffer *ring;
508 637
509 head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 638 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
510 tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; 639 if (ring->size == 0)
640 return 0;
511 641
512 seq_printf(m, "RingHead : %08x\n", head); 642 seq_printf(m, "Ring %s:\n", ring->name);
513 seq_printf(m, "RingTail : %08x\n", tail); 643 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
514 seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size); 644 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
515 seq_printf(m, "Acthd : %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD)); 645 seq_printf(m, " Size : %08x\n", ring->size);
646 seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
647 seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
648 if (IS_GEN6(dev)) {
649 seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
650 seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
651 }
652 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
653 seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
516 654
517 return 0; 655 return 0;
518} 656}
519 657
658static const char *ring_str(int ring)
659{
660 switch (ring) {
661 case RING_RENDER: return " render";
662 case RING_BSD: return " bsd";
663 case RING_BLT: return " blt";
664 default: return "";
665 }
666}
667
520static const char *pin_flag(int pinned) 668static const char *pin_flag(int pinned)
521{ 669{
522 if (pinned > 0) 670 if (pinned > 0)
@@ -547,6 +695,37 @@ static const char *purgeable_flag(int purgeable)
547 return purgeable ? " purgeable" : ""; 695 return purgeable ? " purgeable" : "";
548} 696}
549 697
698static void print_error_buffers(struct seq_file *m,
699 const char *name,
700 struct drm_i915_error_buffer *err,
701 int count)
702{
703 seq_printf(m, "%s [%d]:\n", name, count);
704
705 while (count--) {
706 seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s",
707 err->gtt_offset,
708 err->size,
709 err->read_domains,
710 err->write_domain,
711 err->seqno,
712 pin_flag(err->pinned),
713 tiling_flag(err->tiling),
714 dirty_flag(err->dirty),
715 purgeable_flag(err->purgeable),
716 ring_str(err->ring),
717 agp_type_str(err->agp_type));
718
719 if (err->name)
720 seq_printf(m, " (name: %d)", err->name);
721 if (err->fence_reg != I915_FENCE_REG_NONE)
722 seq_printf(m, " (fence: %d)", err->fence_reg);
723
724 seq_printf(m, "\n");
725 err++;
726 }
727}
728
550static int i915_error_state(struct seq_file *m, void *unused) 729static int i915_error_state(struct seq_file *m, void *unused)
551{ 730{
552 struct drm_info_node *node = (struct drm_info_node *) m->private; 731 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -568,47 +747,54 @@ static int i915_error_state(struct seq_file *m, void *unused)
568 error->time.tv_usec); 747 error->time.tv_usec);
569 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 748 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
570 seq_printf(m, "EIR: 0x%08x\n", error->eir); 749 seq_printf(m, "EIR: 0x%08x\n", error->eir);
571 seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er); 750 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
572 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); 751 if (INTEL_INFO(dev)->gen >= 6) {
752 seq_printf(m, "ERROR: 0x%08x\n", error->error);
753 seq_printf(m, "Blitter command stream:\n");
754 seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd);
755 seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir);
756 seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr);
757 seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone);
758 seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno);
759 seq_printf(m, "Video (BSD) command stream:\n");
760 seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd);
761 seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir);
762 seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr);
763 seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone);
764 seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno);
765 }
766 seq_printf(m, "Render command stream:\n");
767 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
573 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir); 768 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
574 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); 769 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
575 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); 770 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
576 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
577 if (INTEL_INFO(dev)->gen >= 4) { 771 if (INTEL_INFO(dev)->gen >= 4) {
578 seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
579 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); 772 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
773 seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
580 } 774 }
581 seq_printf(m, "seqno: 0x%08x\n", error->seqno); 775 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
582 776 seq_printf(m, " seqno: 0x%08x\n", error->seqno);
583 if (error->active_bo_count) { 777
584 seq_printf(m, "Buffers [%d]:\n", error->active_bo_count); 778 for (i = 0; i < 16; i++)
585 779 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
586 for (i = 0; i < error->active_bo_count; i++) { 780
587 seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s", 781 if (error->active_bo)
588 error->active_bo[i].gtt_offset, 782 print_error_buffers(m, "Active",
589 error->active_bo[i].size, 783 error->active_bo,
590 error->active_bo[i].read_domains, 784 error->active_bo_count);
591 error->active_bo[i].write_domain, 785
592 error->active_bo[i].seqno, 786 if (error->pinned_bo)
593 pin_flag(error->active_bo[i].pinned), 787 print_error_buffers(m, "Pinned",
594 tiling_flag(error->active_bo[i].tiling), 788 error->pinned_bo,
595 dirty_flag(error->active_bo[i].dirty), 789 error->pinned_bo_count);
596 purgeable_flag(error->active_bo[i].purgeable));
597
598 if (error->active_bo[i].name)
599 seq_printf(m, " (name: %d)", error->active_bo[i].name);
600 if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
601 seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
602
603 seq_printf(m, "\n");
604 }
605 }
606 790
607 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) { 791 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
608 if (error->batchbuffer[i]) { 792 if (error->batchbuffer[i]) {
609 struct drm_i915_error_object *obj = error->batchbuffer[i]; 793 struct drm_i915_error_object *obj = error->batchbuffer[i];
610 794
611 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); 795 seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
796 dev_priv->ring[i].name,
797 obj->gtt_offset);
612 offset = 0; 798 offset = 0;
613 for (page = 0; page < obj->page_count; page++) { 799 for (page = 0; page < obj->page_count; page++) {
614 for (elt = 0; elt < PAGE_SIZE/4; elt++) { 800 for (elt = 0; elt < PAGE_SIZE/4; elt++) {
@@ -635,6 +821,9 @@ static int i915_error_state(struct seq_file *m, void *unused)
635 if (error->overlay) 821 if (error->overlay)
636 intel_overlay_print_error_state(m, error->overlay); 822 intel_overlay_print_error_state(m, error->overlay);
637 823
824 if (error->display)
825 intel_display_print_error_state(m, dev, error->display);
826
638out: 827out:
639 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 828 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
640 829
@@ -658,15 +847,51 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
658 struct drm_info_node *node = (struct drm_info_node *) m->private; 847 struct drm_info_node *node = (struct drm_info_node *) m->private;
659 struct drm_device *dev = node->minor->dev; 848 struct drm_device *dev = node->minor->dev;
660 drm_i915_private_t *dev_priv = dev->dev_private; 849 drm_i915_private_t *dev_priv = dev->dev_private;
661 u16 rgvswctl = I915_READ16(MEMSWCTL);
662 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
663 850
664 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 851 if (IS_GEN5(dev)) {
665 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 852 u16 rgvswctl = I915_READ16(MEMSWCTL);
666 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 853 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
667 MEMSTAT_VID_SHIFT); 854
668 seq_printf(m, "Current P-state: %d\n", 855 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
669 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 856 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
857 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
858 MEMSTAT_VID_SHIFT);
859 seq_printf(m, "Current P-state: %d\n",
860 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
861 } else if (IS_GEN6(dev)) {
862 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
863 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
864 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
865 int max_freq;
866
867 /* RPSTAT1 is in the GT power well */
868 __gen6_force_wake_get(dev_priv);
869
870 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
871 seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
872 seq_printf(m, "Render p-state ratio: %d\n",
873 (gt_perf_status & 0xff00) >> 8);
874 seq_printf(m, "Render p-state VID: %d\n",
875 gt_perf_status & 0xff);
876 seq_printf(m, "Render p-state limit: %d\n",
877 rp_state_limits & 0xff);
878
879 max_freq = (rp_state_cap & 0xff0000) >> 16;
880 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
881 max_freq * 100);
882
883 max_freq = (rp_state_cap & 0xff00) >> 8;
884 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
885 max_freq * 100);
886
887 max_freq = rp_state_cap & 0xff;
888 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
889 max_freq * 100);
890
891 __gen6_force_wake_put(dev_priv);
892 } else {
893 seq_printf(m, "no P-state info available\n");
894 }
670 895
671 return 0; 896 return 0;
672} 897}
@@ -715,7 +940,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
715 struct drm_device *dev = node->minor->dev; 940 struct drm_device *dev = node->minor->dev;
716 drm_i915_private_t *dev_priv = dev->dev_private; 941 drm_i915_private_t *dev_priv = dev->dev_private;
717 u32 rgvmodectl = I915_READ(MEMMODECTL); 942 u32 rgvmodectl = I915_READ(MEMMODECTL);
718 u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY); 943 u32 rstdbyctl = I915_READ(RSTDBYCTL);
719 u16 crstandvid = I915_READ16(CRSTANDVID); 944 u16 crstandvid = I915_READ16(CRSTANDVID);
720 945
721 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 946 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
@@ -738,6 +963,30 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
738 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 963 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
739 seq_printf(m, "Render standby enabled: %s\n", 964 seq_printf(m, "Render standby enabled: %s\n",
740 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); 965 (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
966 seq_printf(m, "Current RS state: ");
967 switch (rstdbyctl & RSX_STATUS_MASK) {
968 case RSX_STATUS_ON:
969 seq_printf(m, "on\n");
970 break;
971 case RSX_STATUS_RC1:
972 seq_printf(m, "RC1\n");
973 break;
974 case RSX_STATUS_RC1E:
975 seq_printf(m, "RC1E\n");
976 break;
977 case RSX_STATUS_RS1:
978 seq_printf(m, "RS1\n");
979 break;
980 case RSX_STATUS_RS2:
981 seq_printf(m, "RS2 (RC6)\n");
982 break;
983 case RSX_STATUS_RS3:
984 seq_printf(m, "RC3 (RC6+)\n");
985 break;
986 default:
987 seq_printf(m, "unknown\n");
988 break;
989 }
741 990
742 return 0; 991 return 0;
743} 992}
@@ -794,7 +1043,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
794 drm_i915_private_t *dev_priv = dev->dev_private; 1043 drm_i915_private_t *dev_priv = dev->dev_private;
795 bool sr_enabled = false; 1044 bool sr_enabled = false;
796 1045
797 if (IS_GEN5(dev)) 1046 if (HAS_PCH_SPLIT(dev))
798 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1047 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
799 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1048 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
800 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1049 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
@@ -886,7 +1135,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
886 fb->base.height, 1135 fb->base.height,
887 fb->base.depth, 1136 fb->base.depth,
888 fb->base.bits_per_pixel); 1137 fb->base.bits_per_pixel);
889 describe_obj(m, to_intel_bo(fb->obj)); 1138 describe_obj(m, fb->obj);
890 seq_printf(m, "\n"); 1139 seq_printf(m, "\n");
891 1140
892 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1141 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
@@ -898,7 +1147,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
898 fb->base.height, 1147 fb->base.height,
899 fb->base.depth, 1148 fb->base.depth,
900 fb->base.bits_per_pixel); 1149 fb->base.bits_per_pixel);
901 describe_obj(m, to_intel_bo(fb->obj)); 1150 describe_obj(m, fb->obj);
902 seq_printf(m, "\n"); 1151 seq_printf(m, "\n");
903 } 1152 }
904 1153
@@ -943,7 +1192,6 @@ i915_wedged_write(struct file *filp,
943 loff_t *ppos) 1192 loff_t *ppos)
944{ 1193{
945 struct drm_device *dev = filp->private_data; 1194 struct drm_device *dev = filp->private_data;
946 drm_i915_private_t *dev_priv = dev->dev_private;
947 char buf[20]; 1195 char buf[20];
948 int val = 1; 1196 int val = 1;
949 1197
@@ -959,12 +1207,7 @@ i915_wedged_write(struct file *filp,
959 } 1207 }
960 1208
961 DRM_INFO("Manually setting wedged to %d\n", val); 1209 DRM_INFO("Manually setting wedged to %d\n", val);
962 1210 i915_handle_error(dev, val);
963 atomic_set(&dev_priv->mm.wedged, val);
964 if (val) {
965 wake_up_all(&dev_priv->irq_queue);
966 queue_work(dev_priv->wq, &dev_priv->error_work);
967 }
968 1211
969 return cnt; 1212 return cnt;
970} 1213}
@@ -1018,6 +1261,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
1018static struct drm_info_list i915_debugfs_list[] = { 1261static struct drm_info_list i915_debugfs_list[] = {
1019 {"i915_capabilities", i915_capabilities, 0, 0}, 1262 {"i915_capabilities", i915_capabilities, 0, 0},
1020 {"i915_gem_objects", i915_gem_object_info, 0}, 1263 {"i915_gem_objects", i915_gem_object_info, 0},
1264 {"i915_gem_gtt", i915_gem_gtt_info, 0},
1021 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 1265 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
1022 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 1266 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
1023 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 1267 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
@@ -1028,9 +1272,15 @@ static struct drm_info_list i915_debugfs_list[] = {
1028 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 1272 {"i915_gem_seqno", i915_gem_seqno_info, 0},
1029 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 1273 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
1030 {"i915_gem_interrupt", i915_interrupt_info, 0}, 1274 {"i915_gem_interrupt", i915_interrupt_info, 0},
1031 {"i915_gem_hws", i915_hws_info, 0}, 1275 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
1032 {"i915_ringbuffer_data", i915_ringbuffer_data, 0}, 1276 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
1033 {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, 1277 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
1278 {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
1279 {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
1280 {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
1281 {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
1282 {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
1283 {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
1034 {"i915_batchbuffers", i915_batchbuffer_info, 0}, 1284 {"i915_batchbuffers", i915_batchbuffer_info, 0},
1035 {"i915_error_state", i915_error_state, 0}, 1285 {"i915_error_state", i915_error_state, 0},
1036 {"i915_rstdby_delays", i915_rstdby_delays, 0}, 1286 {"i915_rstdby_delays", i915_rstdby_delays, 0},