aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-08-07 06:01:26 -0400
committerEric Anholt <eric@anholt.net>2010-08-09 14:24:33 -0400
commite56660ddfb48ccc6777f31cb235db218e0cf5b83 (patch)
treec04a7c145327a557b7b8b8b9762b1ed657a4e17e /drivers
parent7d1c4804ae98cdee572d7d10d8a5deaa2e686285 (diff)
drm/i915: Record error batch buffers using iomem
Directly read the GTT mapping for the contents of the batch buffers rather than relying on possibly stale CPU caches. Also for completeness scan the flushing/inactive lists for the current buffers - we are collecting error state after all. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Eric Anholt <eric@anholt.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c64
1 files changed, 57 insertions, 7 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 854ab1e92fd9..5161cea7a4ef 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -425,9 +425,11 @@ static struct drm_i915_error_object *
425i915_error_object_create(struct drm_device *dev, 425i915_error_object_create(struct drm_device *dev,
426 struct drm_gem_object *src) 426 struct drm_gem_object *src)
427{ 427{
428 drm_i915_private_t *dev_priv = dev->dev_private;
428 struct drm_i915_error_object *dst; 429 struct drm_i915_error_object *dst;
429 struct drm_i915_gem_object *src_priv; 430 struct drm_i915_gem_object *src_priv;
430 int page, page_count; 431 int page, page_count;
432 u32 reloc_offset;
431 433
432 if (src == NULL) 434 if (src == NULL)
433 return NULL; 435 return NULL;
@@ -442,18 +444,27 @@ i915_error_object_create(struct drm_device *dev,
442 if (dst == NULL) 444 if (dst == NULL)
443 return NULL; 445 return NULL;
444 446
447 reloc_offset = src_priv->gtt_offset;
445 for (page = 0; page < page_count; page++) { 448 for (page = 0; page < page_count; page++) {
446 void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
447 unsigned long flags; 449 unsigned long flags;
450 void __iomem *s;
451 void *d;
448 452
453 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
449 if (d == NULL) 454 if (d == NULL)
450 goto unwind; 455 goto unwind;
456
451 local_irq_save(flags); 457 local_irq_save(flags);
452 s = kmap_atomic(src_priv->pages[page], KM_IRQ0); 458 s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
453 memcpy(d, s, PAGE_SIZE); 459 reloc_offset,
454 kunmap_atomic(s, KM_IRQ0); 460 KM_IRQ0);
461 memcpy_fromio(d, s, PAGE_SIZE);
462 io_mapping_unmap_atomic(s, KM_IRQ0);
455 local_irq_restore(flags); 463 local_irq_restore(flags);
464
456 dst->pages[page] = d; 465 dst->pages[page] = d;
466
467 reloc_offset += PAGE_SIZE;
457 } 468 }
458 dst->page_count = page_count; 469 dst->page_count = page_count;
459 dst->gtt_offset = src_priv->gtt_offset; 470 dst->gtt_offset = src_priv->gtt_offset;
@@ -613,18 +624,57 @@ static void i915_capture_error_state(struct drm_device *dev)
613 624
614 if (batchbuffer[1] == NULL && 625 if (batchbuffer[1] == NULL &&
615 error->acthd >= obj_priv->gtt_offset && 626 error->acthd >= obj_priv->gtt_offset &&
616 error->acthd < obj_priv->gtt_offset + obj->size && 627 error->acthd < obj_priv->gtt_offset + obj->size)
617 batchbuffer[0] != obj)
618 batchbuffer[1] = obj; 628 batchbuffer[1] = obj;
619 629
620 count++; 630 count++;
621 } 631 }
632 /* Scan the other lists for completeness for those bizarre errors. */
633 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
634 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
635 struct drm_gem_object *obj = &obj_priv->base;
636
637 if (batchbuffer[0] == NULL &&
638 bbaddr >= obj_priv->gtt_offset &&
639 bbaddr < obj_priv->gtt_offset + obj->size)
640 batchbuffer[0] = obj;
641
642 if (batchbuffer[1] == NULL &&
643 error->acthd >= obj_priv->gtt_offset &&
644 error->acthd < obj_priv->gtt_offset + obj->size)
645 batchbuffer[1] = obj;
646
647 if (batchbuffer[0] && batchbuffer[1])
648 break;
649 }
650 }
651 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
652 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
653 struct drm_gem_object *obj = &obj_priv->base;
654
655 if (batchbuffer[0] == NULL &&
656 bbaddr >= obj_priv->gtt_offset &&
657 bbaddr < obj_priv->gtt_offset + obj->size)
658 batchbuffer[0] = obj;
659
660 if (batchbuffer[1] == NULL &&
661 error->acthd >= obj_priv->gtt_offset &&
662 error->acthd < obj_priv->gtt_offset + obj->size)
663 batchbuffer[1] = obj;
664
665 if (batchbuffer[0] && batchbuffer[1])
666 break;
667 }
668 }
622 669
623 /* We need to copy these to an anonymous buffer as the simplest 670 /* We need to copy these to an anonymous buffer as the simplest
624 * method to avoid being overwritten by userpace. 671 * method to avoid being overwritten by userpace.
625 */ 672 */
626 error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]); 673 error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
627 error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); 674 if (batchbuffer[1] != batchbuffer[0])
675 error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
676 else
677 error->batchbuffer[1] = NULL;
628 678
629 /* Record the ringbuffer */ 679 /* Record the ringbuffer */
630 error->ringbuffer = i915_error_object_create(dev, 680 error->ringbuffer = i915_error_object_create(dev,