aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_debugfs.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-26 21:57:59 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-26 21:57:59 -0400
commitc48c43e422c1404fd72c57d1d21a6f6d01e18900 (patch)
tree48e5d3828b4f5479361986535f71a1ae44e4f3c1 /drivers/gpu/drm/i915/i915_debugfs.c
parent520045db940a381d2bee1c1b2179f7921b40fb10 (diff)
parent135cba0dc399fdd47bd3ae305c1db75fcd77243f (diff)
Merge branch 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (476 commits) vmwgfx: Implement a proper GMR eviction mechanism drm/radeon/kms: fix r6xx/7xx 1D tiling CS checker v2 drm/radeon/kms: properly compute group_size on 6xx/7xx drm/radeon/kms: fix 2D tile height alignment in the r600 CS checker drm/radeon/kms/evergreen: set the clear state to the blit state drm/radeon/kms: don't poll dac load detect. gpu: Add Intel GMA500(Poulsbo) Stub Driver drm/radeon/kms: MC vram map needs to be >= pci aperture size drm/radeon/kms: implement display watermark support for evergreen drm/radeon/kms/evergreen: add some additional safe regs v2 drm/radeon/r600: fix tiling issues in CS checker. drm/i915: Move gpu_write_list to per-ring drm/i915: Invalidate the to-ring, flush the old-ring when updating domains drm/i915/ringbuffer: Write the value passed in to the tail register agp/intel: Restore valid PTE bit for Sandybridge after bdd3072 drm/i915: Fix flushing regression from 9af90d19f drm/i915/sdvo: Remove unused encoding member i915: enable AVI infoframe for intel_hdmi.c [v4] drm/i915: Fix current fb blocking for page flip drm/i915: IS_IRONLAKE is synonymous with gen == 5 ... Fix up conflicts in - drivers/gpu/drm/i915/{i915_gem.c, i915/intel_overlay.c}: due to the new simplified stack-based kmap_atomic() interface - drivers/gpu/drm/vmwgfx/vmwgfx_drv.c: added .llseek entry due to BKL removal cleanups.
Diffstat (limited to 'drivers/gpu/drm/i915/i915_debugfs.c')
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c336
1 files changed, 267 insertions, 69 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 048149748fdc..1f4f3ceb63c7 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -40,9 +40,51 @@
40 40
41#if defined(CONFIG_DEBUG_FS) 41#if defined(CONFIG_DEBUG_FS)
42 42
43#define ACTIVE_LIST 1 43enum {
44#define FLUSHING_LIST 2 44 ACTIVE_LIST,
45#define INACTIVE_LIST 3 45 FLUSHING_LIST,
46 INACTIVE_LIST,
47 PINNED_LIST,
48 DEFERRED_FREE_LIST,
49};
50
51static const char *yesno(int v)
52{
53 return v ? "yes" : "no";
54}
55
56static int i915_capabilities(struct seq_file *m, void *data)
57{
58 struct drm_info_node *node = (struct drm_info_node *) m->private;
59 struct drm_device *dev = node->minor->dev;
60 const struct intel_device_info *info = INTEL_INFO(dev);
61
62 seq_printf(m, "gen: %d\n", info->gen);
63#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
64 B(is_mobile);
65 B(is_i85x);
66 B(is_i915g);
67 B(is_i945gm);
68 B(is_g33);
69 B(need_gfx_hws);
70 B(is_g4x);
71 B(is_pineview);
72 B(is_broadwater);
73 B(is_crestline);
74 B(has_fbc);
75 B(has_rc6);
76 B(has_pipe_cxsr);
77 B(has_hotplug);
78 B(cursor_needs_physical);
79 B(has_overlay);
80 B(overlay_needs_physical);
81 B(supports_tv);
82 B(has_bsd_ring);
83 B(has_blt_ring);
84#undef B
85
86 return 0;
87}
46 88
47static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv) 89static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
48{ 90{
@@ -64,6 +106,29 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
64 } 106 }
65} 107}
66 108
109static void
110describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
111{
112 seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
113 &obj->base,
114 get_pin_flag(obj),
115 get_tiling_flag(obj),
116 obj->base.size,
117 obj->base.read_domains,
118 obj->base.write_domain,
119 obj->last_rendering_seqno,
120 obj->dirty ? " dirty" : "",
121 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
122 if (obj->base.name)
123 seq_printf(m, " (name: %d)", obj->base.name);
124 if (obj->fence_reg != I915_FENCE_REG_NONE)
125 seq_printf(m, " (fence: %d)", obj->fence_reg);
126 if (obj->gtt_space != NULL)
127 seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
128 if (obj->ring != NULL)
129 seq_printf(m, " (%s)", obj->ring->name);
130}
131
67static int i915_gem_object_list_info(struct seq_file *m, void *data) 132static int i915_gem_object_list_info(struct seq_file *m, void *data)
68{ 133{
69 struct drm_info_node *node = (struct drm_info_node *) m->private; 134 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -72,56 +137,80 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
72 struct drm_device *dev = node->minor->dev; 137 struct drm_device *dev = node->minor->dev;
73 drm_i915_private_t *dev_priv = dev->dev_private; 138 drm_i915_private_t *dev_priv = dev->dev_private;
74 struct drm_i915_gem_object *obj_priv; 139 struct drm_i915_gem_object *obj_priv;
75 spinlock_t *lock = NULL; 140 size_t total_obj_size, total_gtt_size;
141 int count, ret;
142
143 ret = mutex_lock_interruptible(&dev->struct_mutex);
144 if (ret)
145 return ret;
76 146
77 switch (list) { 147 switch (list) {
78 case ACTIVE_LIST: 148 case ACTIVE_LIST:
79 seq_printf(m, "Active:\n"); 149 seq_printf(m, "Active:\n");
80 lock = &dev_priv->mm.active_list_lock; 150 head = &dev_priv->mm.active_list;
81 head = &dev_priv->render_ring.active_list;
82 break; 151 break;
83 case INACTIVE_LIST: 152 case INACTIVE_LIST:
84 seq_printf(m, "Inactive:\n"); 153 seq_printf(m, "Inactive:\n");
85 head = &dev_priv->mm.inactive_list; 154 head = &dev_priv->mm.inactive_list;
86 break; 155 break;
156 case PINNED_LIST:
157 seq_printf(m, "Pinned:\n");
158 head = &dev_priv->mm.pinned_list;
159 break;
87 case FLUSHING_LIST: 160 case FLUSHING_LIST:
88 seq_printf(m, "Flushing:\n"); 161 seq_printf(m, "Flushing:\n");
89 head = &dev_priv->mm.flushing_list; 162 head = &dev_priv->mm.flushing_list;
90 break; 163 break;
164 case DEFERRED_FREE_LIST:
165 seq_printf(m, "Deferred free:\n");
166 head = &dev_priv->mm.deferred_free_list;
167 break;
91 default: 168 default:
92 DRM_INFO("Ooops, unexpected list\n"); 169 mutex_unlock(&dev->struct_mutex);
93 return 0; 170 return -EINVAL;
94 } 171 }
95 172
96 if (lock) 173 total_obj_size = total_gtt_size = count = 0;
97 spin_lock(lock); 174 list_for_each_entry(obj_priv, head, mm_list) {
98 list_for_each_entry(obj_priv, head, list) 175 seq_printf(m, " ");
99 { 176 describe_obj(m, obj_priv);
100 seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s",
101 &obj_priv->base,
102 get_pin_flag(obj_priv),
103 obj_priv->base.size,
104 obj_priv->base.read_domains,
105 obj_priv->base.write_domain,
106 obj_priv->last_rendering_seqno,
107 obj_priv->dirty ? " dirty" : "",
108 obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
109
110 if (obj_priv->base.name)
111 seq_printf(m, " (name: %d)", obj_priv->base.name);
112 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
113 seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
114 if (obj_priv->gtt_space != NULL)
115 seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset);
116
117 seq_printf(m, "\n"); 177 seq_printf(m, "\n");
178 total_obj_size += obj_priv->base.size;
179 total_gtt_size += obj_priv->gtt_space->size;
180 count++;
118 } 181 }
182 mutex_unlock(&dev->struct_mutex);
119 183
120 if (lock) 184 seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
121 spin_unlock(lock); 185 count, total_obj_size, total_gtt_size);
122 return 0; 186 return 0;
123} 187}
124 188
189static int i915_gem_object_info(struct seq_file *m, void* data)
190{
191 struct drm_info_node *node = (struct drm_info_node *) m->private;
192 struct drm_device *dev = node->minor->dev;
193 struct drm_i915_private *dev_priv = dev->dev_private;
194 int ret;
195
196 ret = mutex_lock_interruptible(&dev->struct_mutex);
197 if (ret)
198 return ret;
199
200 seq_printf(m, "%u objects\n", dev_priv->mm.object_count);
201 seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory);
202 seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count);
203 seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory);
204 seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count);
205 seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory);
206 seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total);
207
208 mutex_unlock(&dev->struct_mutex);
209
210 return 0;
211}
212
213
125static int i915_gem_pageflip_info(struct seq_file *m, void *data) 214static int i915_gem_pageflip_info(struct seq_file *m, void *data)
126{ 215{
127 struct drm_info_node *node = (struct drm_info_node *) m->private; 216 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -176,6 +265,11 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
176 struct drm_device *dev = node->minor->dev; 265 struct drm_device *dev = node->minor->dev;
177 drm_i915_private_t *dev_priv = dev->dev_private; 266 drm_i915_private_t *dev_priv = dev->dev_private;
178 struct drm_i915_gem_request *gem_request; 267 struct drm_i915_gem_request *gem_request;
268 int ret;
269
270 ret = mutex_lock_interruptible(&dev->struct_mutex);
271 if (ret)
272 return ret;
179 273
180 seq_printf(m, "Request:\n"); 274 seq_printf(m, "Request:\n");
181 list_for_each_entry(gem_request, &dev_priv->render_ring.request_list, 275 list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
@@ -184,6 +278,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
184 gem_request->seqno, 278 gem_request->seqno,
185 (int) (jiffies - gem_request->emitted_jiffies)); 279 (int) (jiffies - gem_request->emitted_jiffies));
186 } 280 }
281 mutex_unlock(&dev->struct_mutex);
282
187 return 0; 283 return 0;
188} 284}
189 285
@@ -192,16 +288,24 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
192 struct drm_info_node *node = (struct drm_info_node *) m->private; 288 struct drm_info_node *node = (struct drm_info_node *) m->private;
193 struct drm_device *dev = node->minor->dev; 289 struct drm_device *dev = node->minor->dev;
194 drm_i915_private_t *dev_priv = dev->dev_private; 290 drm_i915_private_t *dev_priv = dev->dev_private;
291 int ret;
292
293 ret = mutex_lock_interruptible(&dev->struct_mutex);
294 if (ret)
295 return ret;
195 296
196 if (dev_priv->render_ring.status_page.page_addr != NULL) { 297 if (dev_priv->render_ring.status_page.page_addr != NULL) {
197 seq_printf(m, "Current sequence: %d\n", 298 seq_printf(m, "Current sequence: %d\n",
198 i915_get_gem_seqno(dev, &dev_priv->render_ring)); 299 dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
199 } else { 300 } else {
200 seq_printf(m, "Current sequence: hws uninitialized\n"); 301 seq_printf(m, "Current sequence: hws uninitialized\n");
201 } 302 }
202 seq_printf(m, "Waiter sequence: %d\n", 303 seq_printf(m, "Waiter sequence: %d\n",
203 dev_priv->mm.waiting_gem_seqno); 304 dev_priv->mm.waiting_gem_seqno);
204 seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); 305 seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
306
307 mutex_unlock(&dev->struct_mutex);
308
205 return 0; 309 return 0;
206} 310}
207 311
@@ -211,6 +315,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
211 struct drm_info_node *node = (struct drm_info_node *) m->private; 315 struct drm_info_node *node = (struct drm_info_node *) m->private;
212 struct drm_device *dev = node->minor->dev; 316 struct drm_device *dev = node->minor->dev;
213 drm_i915_private_t *dev_priv = dev->dev_private; 317 drm_i915_private_t *dev_priv = dev->dev_private;
318 int ret;
319
320 ret = mutex_lock_interruptible(&dev->struct_mutex);
321 if (ret)
322 return ret;
214 323
215 if (!HAS_PCH_SPLIT(dev)) { 324 if (!HAS_PCH_SPLIT(dev)) {
216 seq_printf(m, "Interrupt enable: %08x\n", 325 seq_printf(m, "Interrupt enable: %08x\n",
@@ -247,7 +356,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
247 atomic_read(&dev_priv->irq_received)); 356 atomic_read(&dev_priv->irq_received));
248 if (dev_priv->render_ring.status_page.page_addr != NULL) { 357 if (dev_priv->render_ring.status_page.page_addr != NULL) {
249 seq_printf(m, "Current sequence: %d\n", 358 seq_printf(m, "Current sequence: %d\n",
250 i915_get_gem_seqno(dev, &dev_priv->render_ring)); 359 dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
251 } else { 360 } else {
252 seq_printf(m, "Current sequence: hws uninitialized\n"); 361 seq_printf(m, "Current sequence: hws uninitialized\n");
253 } 362 }
@@ -255,6 +364,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
255 dev_priv->mm.waiting_gem_seqno); 364 dev_priv->mm.waiting_gem_seqno);
256 seq_printf(m, "IRQ sequence: %d\n", 365 seq_printf(m, "IRQ sequence: %d\n",
257 dev_priv->mm.irq_gem_seqno); 366 dev_priv->mm.irq_gem_seqno);
367 mutex_unlock(&dev->struct_mutex);
368
258 return 0; 369 return 0;
259} 370}
260 371
@@ -263,7 +374,11 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
263 struct drm_info_node *node = (struct drm_info_node *) m->private; 374 struct drm_info_node *node = (struct drm_info_node *) m->private;
264 struct drm_device *dev = node->minor->dev; 375 struct drm_device *dev = node->minor->dev;
265 drm_i915_private_t *dev_priv = dev->dev_private; 376 drm_i915_private_t *dev_priv = dev->dev_private;
266 int i; 377 int i, ret;
378
379 ret = mutex_lock_interruptible(&dev->struct_mutex);
380 if (ret)
381 return ret;
267 382
268 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 383 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
269 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 384 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
@@ -289,6 +404,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
289 seq_printf(m, "\n"); 404 seq_printf(m, "\n");
290 } 405 }
291 } 406 }
407 mutex_unlock(&dev->struct_mutex);
292 408
293 return 0; 409 return 0;
294} 410}
@@ -313,16 +429,19 @@ static int i915_hws_info(struct seq_file *m, void *data)
313 return 0; 429 return 0;
314} 430}
315 431
316static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count) 432static void i915_dump_object(struct seq_file *m,
433 struct io_mapping *mapping,
434 struct drm_i915_gem_object *obj_priv)
317{ 435{
318 int page, i; 436 int page, page_count, i;
319 uint32_t *mem;
320 437
438 page_count = obj_priv->base.size / PAGE_SIZE;
321 for (page = 0; page < page_count; page++) { 439 for (page = 0; page < page_count; page++) {
322 mem = kmap_atomic(pages[page], KM_USER0); 440 u32 *mem = io_mapping_map_wc(mapping,
441 obj_priv->gtt_offset + page * PAGE_SIZE);
323 for (i = 0; i < PAGE_SIZE; i += 4) 442 for (i = 0; i < PAGE_SIZE; i += 4)
324 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); 443 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
325 kunmap_atomic(mem, KM_USER0); 444 io_mapping_unmap(mem);
326 } 445 }
327} 446}
328 447
@@ -335,27 +454,20 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
335 struct drm_i915_gem_object *obj_priv; 454 struct drm_i915_gem_object *obj_priv;
336 int ret; 455 int ret;
337 456
338 spin_lock(&dev_priv->mm.active_list_lock); 457 ret = mutex_lock_interruptible(&dev->struct_mutex);
458 if (ret)
459 return ret;
339 460
340 list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list, 461 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
341 list) {
342 obj = &obj_priv->base; 462 obj = &obj_priv->base;
343 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { 463 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
344 ret = i915_gem_object_get_pages(obj, 0); 464 seq_printf(m, "--- gtt_offset = 0x%08x\n",
345 if (ret) { 465 obj_priv->gtt_offset);
346 DRM_ERROR("Failed to get pages: %d\n", ret); 466 i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
347 spin_unlock(&dev_priv->mm.active_list_lock);
348 return ret;
349 }
350
351 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
352 i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
353
354 i915_gem_object_put_pages(obj);
355 } 467 }
356 } 468 }
357 469
358 spin_unlock(&dev_priv->mm.active_list_lock); 470 mutex_unlock(&dev->struct_mutex);
359 471
360 return 0; 472 return 0;
361} 473}
@@ -365,20 +477,24 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
365 struct drm_info_node *node = (struct drm_info_node *) m->private; 477 struct drm_info_node *node = (struct drm_info_node *) m->private;
366 struct drm_device *dev = node->minor->dev; 478 struct drm_device *dev = node->minor->dev;
367 drm_i915_private_t *dev_priv = dev->dev_private; 479 drm_i915_private_t *dev_priv = dev->dev_private;
368 u8 *virt; 480 int ret;
369 uint32_t *ptr, off; 481
482 ret = mutex_lock_interruptible(&dev->struct_mutex);
483 if (ret)
484 return ret;
370 485
371 if (!dev_priv->render_ring.gem_object) { 486 if (!dev_priv->render_ring.gem_object) {
372 seq_printf(m, "No ringbuffer setup\n"); 487 seq_printf(m, "No ringbuffer setup\n");
373 return 0; 488 } else {
374 } 489 u8 *virt = dev_priv->render_ring.virtual_start;
375 490 uint32_t off;
376 virt = dev_priv->render_ring.virtual_start;
377 491
378 for (off = 0; off < dev_priv->render_ring.size; off += 4) { 492 for (off = 0; off < dev_priv->render_ring.size; off += 4) {
379 ptr = (uint32_t *)(virt + off); 493 uint32_t *ptr = (uint32_t *)(virt + off);
380 seq_printf(m, "%08x : %08x\n", off, *ptr); 494 seq_printf(m, "%08x : %08x\n", off, *ptr);
495 }
381 } 496 }
497 mutex_unlock(&dev->struct_mutex);
382 498
383 return 0; 499 return 0;
384} 500}
@@ -396,7 +512,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
396 seq_printf(m, "RingHead : %08x\n", head); 512 seq_printf(m, "RingHead : %08x\n", head);
397 seq_printf(m, "RingTail : %08x\n", tail); 513 seq_printf(m, "RingTail : %08x\n", tail);
398 seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size); 514 seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size);
399 seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); 515 seq_printf(m, "Acthd : %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD));
400 516
401 return 0; 517 return 0;
402} 518}
@@ -458,7 +574,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
458 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); 574 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
459 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); 575 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
460 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); 576 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
461 if (IS_I965G(dev)) { 577 if (INTEL_INFO(dev)->gen >= 4) {
462 seq_printf(m, " INSTPS: 0x%08x\n", error->instps); 578 seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
463 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); 579 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
464 } 580 }
@@ -642,6 +758,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
642 } else { 758 } else {
643 seq_printf(m, "FBC disabled: "); 759 seq_printf(m, "FBC disabled: ");
644 switch (dev_priv->no_fbc_reason) { 760 switch (dev_priv->no_fbc_reason) {
761 case FBC_NO_OUTPUT:
762 seq_printf(m, "no outputs");
763 break;
645 case FBC_STOLEN_TOO_SMALL: 764 case FBC_STOLEN_TOO_SMALL:
646 seq_printf(m, "not enough stolen memory"); 765 seq_printf(m, "not enough stolen memory");
647 break; 766 break;
@@ -675,15 +794,17 @@ static int i915_sr_status(struct seq_file *m, void *unused)
675 drm_i915_private_t *dev_priv = dev->dev_private; 794 drm_i915_private_t *dev_priv = dev->dev_private;
676 bool sr_enabled = false; 795 bool sr_enabled = false;
677 796
678 if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev)) 797 if (IS_GEN5(dev))
798 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
799 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
679 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 800 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
680 else if (IS_I915GM(dev)) 801 else if (IS_I915GM(dev))
681 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 802 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
682 else if (IS_PINEVIEW(dev)) 803 else if (IS_PINEVIEW(dev))
683 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 804 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
684 805
685 seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" : 806 seq_printf(m, "self-refresh: %s\n",
686 "disabled"); 807 sr_enabled ? "enabled" : "disabled");
687 808
688 return 0; 809 return 0;
689} 810}
@@ -694,10 +815,16 @@ static int i915_emon_status(struct seq_file *m, void *unused)
694 struct drm_device *dev = node->minor->dev; 815 struct drm_device *dev = node->minor->dev;
695 drm_i915_private_t *dev_priv = dev->dev_private; 816 drm_i915_private_t *dev_priv = dev->dev_private;
696 unsigned long temp, chipset, gfx; 817 unsigned long temp, chipset, gfx;
818 int ret;
819
820 ret = mutex_lock_interruptible(&dev->struct_mutex);
821 if (ret)
822 return ret;
697 823
698 temp = i915_mch_val(dev_priv); 824 temp = i915_mch_val(dev_priv);
699 chipset = i915_chipset_val(dev_priv); 825 chipset = i915_chipset_val(dev_priv);
700 gfx = i915_gfx_val(dev_priv); 826 gfx = i915_gfx_val(dev_priv);
827 mutex_unlock(&dev->struct_mutex);
701 828
702 seq_printf(m, "GMCH temp: %ld\n", temp); 829 seq_printf(m, "GMCH temp: %ld\n", temp);
703 seq_printf(m, "Chipset power: %ld\n", chipset); 830 seq_printf(m, "Chipset power: %ld\n", chipset);
@@ -718,6 +845,68 @@ static int i915_gfxec(struct seq_file *m, void *unused)
718 return 0; 845 return 0;
719} 846}
720 847
848static int i915_opregion(struct seq_file *m, void *unused)
849{
850 struct drm_info_node *node = (struct drm_info_node *) m->private;
851 struct drm_device *dev = node->minor->dev;
852 drm_i915_private_t *dev_priv = dev->dev_private;
853 struct intel_opregion *opregion = &dev_priv->opregion;
854 int ret;
855
856 ret = mutex_lock_interruptible(&dev->struct_mutex);
857 if (ret)
858 return ret;
859
860 if (opregion->header)
861 seq_write(m, opregion->header, OPREGION_SIZE);
862
863 mutex_unlock(&dev->struct_mutex);
864
865 return 0;
866}
867
868static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
869{
870 struct drm_info_node *node = (struct drm_info_node *) m->private;
871 struct drm_device *dev = node->minor->dev;
872 drm_i915_private_t *dev_priv = dev->dev_private;
873 struct intel_fbdev *ifbdev;
874 struct intel_framebuffer *fb;
875 int ret;
876
877 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
878 if (ret)
879 return ret;
880
881 ifbdev = dev_priv->fbdev;
882 fb = to_intel_framebuffer(ifbdev->helper.fb);
883
884 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
885 fb->base.width,
886 fb->base.height,
887 fb->base.depth,
888 fb->base.bits_per_pixel);
889 describe_obj(m, to_intel_bo(fb->obj));
890 seq_printf(m, "\n");
891
892 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
893 if (&fb->base == ifbdev->helper.fb)
894 continue;
895
896 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
897 fb->base.width,
898 fb->base.height,
899 fb->base.depth,
900 fb->base.bits_per_pixel);
901 describe_obj(m, to_intel_bo(fb->obj));
902 seq_printf(m, "\n");
903 }
904
905 mutex_unlock(&dev->mode_config.mutex);
906
907 return 0;
908}
909
721static int 910static int
722i915_wedged_open(struct inode *inode, 911i915_wedged_open(struct inode *inode,
723 struct file *filp) 912 struct file *filp)
@@ -741,6 +930,9 @@ i915_wedged_read(struct file *filp,
741 "wedged : %d\n", 930 "wedged : %d\n",
742 atomic_read(&dev_priv->mm.wedged)); 931 atomic_read(&dev_priv->mm.wedged));
743 932
933 if (len > sizeof (buf))
934 len = sizeof (buf);
935
744 return simple_read_from_buffer(ubuf, max, ppos, buf, len); 936 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
745} 937}
746 938
@@ -770,7 +962,7 @@ i915_wedged_write(struct file *filp,
770 962
771 atomic_set(&dev_priv->mm.wedged, val); 963 atomic_set(&dev_priv->mm.wedged, val);
772 if (val) { 964 if (val) {
773 DRM_WAKEUP(&dev_priv->irq_queue); 965 wake_up_all(&dev_priv->irq_queue);
774 queue_work(dev_priv->wq, &dev_priv->error_work); 966 queue_work(dev_priv->wq, &dev_priv->error_work);
775 } 967 }
776 968
@@ -824,9 +1016,13 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
824} 1016}
825 1017
826static struct drm_info_list i915_debugfs_list[] = { 1018static struct drm_info_list i915_debugfs_list[] = {
1019 {"i915_capabilities", i915_capabilities, 0, 0},
1020 {"i915_gem_objects", i915_gem_object_info, 0},
827 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 1021 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
828 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 1022 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
829 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 1023 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
1024 {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
1025 {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
830 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 1026 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
831 {"i915_gem_request", i915_gem_request_info, 0}, 1027 {"i915_gem_request", i915_gem_request_info, 0},
832 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 1028 {"i915_gem_seqno", i915_gem_seqno_info, 0},
@@ -846,6 +1042,8 @@ static struct drm_info_list i915_debugfs_list[] = {
846 {"i915_gfxec", i915_gfxec, 0}, 1042 {"i915_gfxec", i915_gfxec, 0},
847 {"i915_fbc_status", i915_fbc_status, 0}, 1043 {"i915_fbc_status", i915_fbc_status, 0},
848 {"i915_sr_status", i915_sr_status, 0}, 1044 {"i915_sr_status", i915_sr_status, 0},
1045 {"i915_opregion", i915_opregion, 0},
1046 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
849}; 1047};
850#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 1048#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
851 1049