diff options
| author | Chris Wilson <chris@chris-wilson.co.uk> | 2011-01-07 16:06:07 -0500 |
|---|---|---|
| committer | Chris Wilson <chris@chris-wilson.co.uk> | 2011-01-11 15:44:55 -0500 |
| commit | bcfb2e285827bf0cfea8bbfad18a4fca57fbabae (patch) | |
| tree | a73e795899cdef6ec8bdc0a41ee096b66928fba5 | |
| parent | db66e37d239b45f36a3f6495cf4ec49391b2c089 (diff) | |
drm/i915: Record the error batchbuffer on each ring
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
| -rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 164 |
3 files changed, 50 insertions, 120 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 9c4cdc143be..a7c194a837a 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
| @@ -750,7 +750,9 @@ static int i915_error_state(struct seq_file *m, void *unused) | |||
| 750 | if (error->batchbuffer[i]) { | 750 | if (error->batchbuffer[i]) { |
| 751 | struct drm_i915_error_object *obj = error->batchbuffer[i]; | 751 | struct drm_i915_error_object *obj = error->batchbuffer[i]; |
| 752 | 752 | ||
| 753 | seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); | 753 | seq_printf(m, "%s --- gtt_offset = 0x%08x\n", |
| 754 | dev_priv->ring[i].name, | ||
| 755 | obj->gtt_offset); | ||
| 754 | offset = 0; | 756 | offset = 0; |
| 755 | for (page = 0; page < obj->page_count; page++) { | 757 | for (page = 0; page < obj->page_count; page++) { |
| 756 | for (elt = 0; elt < PAGE_SIZE/4; elt++) { | 758 | for (elt = 0; elt < PAGE_SIZE/4; elt++) { |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 3e78314514a..6c9a042737d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -172,7 +172,7 @@ struct drm_i915_error_state { | |||
| 172 | int page_count; | 172 | int page_count; |
| 173 | u32 gtt_offset; | 173 | u32 gtt_offset; |
| 174 | u32 *pages[0]; | 174 | u32 *pages[0]; |
| 175 | } *ringbuffer, *batchbuffer[2]; | 175 | } *ringbuffer, *batchbuffer[I915_NUM_RINGS]; |
| 176 | struct drm_i915_error_buffer { | 176 | struct drm_i915_error_buffer { |
| 177 | size_t size; | 177 | size_t size; |
| 178 | u32 name; | 178 | u32 name; |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index d431fc4fb84..cf61235b858 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -566,10 +566,9 @@ static void i915_error_work_func(struct work_struct *work) | |||
| 566 | 566 | ||
| 567 | #ifdef CONFIG_DEBUG_FS | 567 | #ifdef CONFIG_DEBUG_FS |
| 568 | static struct drm_i915_error_object * | 568 | static struct drm_i915_error_object * |
| 569 | i915_error_object_create(struct drm_device *dev, | 569 | i915_error_object_create(struct drm_i915_private *dev_priv, |
| 570 | struct drm_i915_gem_object *src) | 570 | struct drm_i915_gem_object *src) |
| 571 | { | 571 | { |
| 572 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 573 | struct drm_i915_error_object *dst; | 572 | struct drm_i915_error_object *dst; |
| 574 | int page, page_count; | 573 | int page, page_count; |
| 575 | u32 reloc_offset; | 574 | u32 reloc_offset; |
| @@ -642,52 +641,6 @@ i915_error_state_free(struct drm_device *dev, | |||
| 642 | kfree(error); | 641 | kfree(error); |
| 643 | } | 642 | } |
| 644 | 643 | ||
| 645 | static u32 | ||
| 646 | i915_get_bbaddr(struct drm_device *dev, u32 *ring) | ||
| 647 | { | ||
| 648 | u32 cmd; | ||
| 649 | |||
| 650 | if (IS_I830(dev) || IS_845G(dev)) | ||
| 651 | cmd = MI_BATCH_BUFFER; | ||
| 652 | else if (INTEL_INFO(dev)->gen >= 4) | ||
| 653 | cmd = (MI_BATCH_BUFFER_START | (2 << 6) | | ||
| 654 | MI_BATCH_NON_SECURE_I965); | ||
| 655 | else | ||
| 656 | cmd = (MI_BATCH_BUFFER_START | (2 << 6)); | ||
| 657 | |||
| 658 | return ring[0] == cmd ? ring[1] : 0; | ||
| 659 | } | ||
| 660 | |||
| 661 | static u32 | ||
| 662 | i915_ringbuffer_last_batch(struct drm_device *dev, | ||
| 663 | struct intel_ring_buffer *ring) | ||
| 664 | { | ||
| 665 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 666 | u32 head, bbaddr; | ||
| 667 | u32 *val; | ||
| 668 | |||
| 669 | /* Locate the current position in the ringbuffer and walk back | ||
| 670 | * to find the most recently dispatched batch buffer. | ||
| 671 | */ | ||
| 672 | head = I915_READ_HEAD(ring) & HEAD_ADDR; | ||
| 673 | |||
| 674 | val = (u32 *)(ring->virtual_start + head); | ||
| 675 | while (--val >= (u32 *)ring->virtual_start) { | ||
| 676 | bbaddr = i915_get_bbaddr(dev, val); | ||
| 677 | if (bbaddr) | ||
| 678 | return bbaddr; | ||
| 679 | } | ||
| 680 | |||
| 681 | val = (u32 *)(ring->virtual_start + ring->size); | ||
| 682 | while (--val >= (u32 *)ring->virtual_start) { | ||
| 683 | bbaddr = i915_get_bbaddr(dev, val); | ||
| 684 | if (bbaddr) | ||
| 685 | return bbaddr; | ||
| 686 | } | ||
| 687 | |||
| 688 | return 0; | ||
| 689 | } | ||
| 690 | |||
| 691 | static u32 capture_bo_list(struct drm_i915_error_buffer *err, | 644 | static u32 capture_bo_list(struct drm_i915_error_buffer *err, |
| 692 | int count, | 645 | int count, |
| 693 | struct list_head *head) | 646 | struct list_head *head) |
| @@ -751,6 +704,36 @@ static void i915_gem_record_fences(struct drm_device *dev, | |||
| 751 | } | 704 | } |
| 752 | } | 705 | } |
| 753 | 706 | ||
| 707 | static struct drm_i915_error_object * | ||
| 708 | i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, | ||
| 709 | struct intel_ring_buffer *ring) | ||
| 710 | { | ||
| 711 | struct drm_i915_gem_object *obj; | ||
| 712 | u32 seqno; | ||
| 713 | |||
| 714 | if (!ring->get_seqno) | ||
| 715 | return NULL; | ||
| 716 | |||
| 717 | seqno = ring->get_seqno(ring); | ||
| 718 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { | ||
| 719 | if (obj->ring != ring) | ||
| 720 | continue; | ||
| 721 | |||
| 722 | if (!i915_seqno_passed(obj->last_rendering_seqno, seqno)) | ||
| 723 | continue; | ||
| 724 | |||
| 725 | if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) | ||
| 726 | continue; | ||
| 727 | |||
| 728 | /* We need to copy these to an anonymous buffer as the simplest | ||
| 729 | * method to avoid being overwritten by userspace. | ||
| 730 | */ | ||
| 731 | return i915_error_object_create(dev_priv, obj); | ||
| 732 | } | ||
| 733 | |||
| 734 | return NULL; | ||
| 735 | } | ||
| 736 | |||
| 754 | /** | 737 | /** |
| 755 | * i915_capture_error_state - capture an error record for later analysis | 738 | * i915_capture_error_state - capture an error record for later analysis |
| 756 | * @dev: drm device | 739 | * @dev: drm device |
| @@ -765,10 +748,8 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
| 765 | struct drm_i915_private *dev_priv = dev->dev_private; | 748 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 766 | struct drm_i915_gem_object *obj; | 749 | struct drm_i915_gem_object *obj; |
| 767 | struct drm_i915_error_state *error; | 750 | struct drm_i915_error_state *error; |
| 768 | struct drm_i915_gem_object *batchbuffer[2]; | ||
| 769 | unsigned long flags; | 751 | unsigned long flags; |
| 770 | u32 bbaddr; | 752 | int i; |
| 771 | int count; | ||
| 772 | 753 | ||
| 773 | spin_lock_irqsave(&dev_priv->error_lock, flags); | 754 | spin_lock_irqsave(&dev_priv->error_lock, flags); |
| 774 | error = dev_priv->first_error; | 755 | error = dev_priv->first_error; |
| @@ -827,83 +808,30 @@ static void i915_capture_error_state(struct drm_device *dev) | |||
| 827 | } | 808 | } |
| 828 | i915_gem_record_fences(dev, error); | 809 | i915_gem_record_fences(dev, error); |
| 829 | 810 | ||
| 830 | bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->ring[RCS]); | 811 | /* Record the active batchbuffers */ |
| 831 | 812 | for (i = 0; i < I915_NUM_RINGS; i++) | |
| 832 | /* Grab the current batchbuffer, most likely to have crashed. */ | 813 | error->batchbuffer[i] = |
| 833 | batchbuffer[0] = NULL; | 814 | i915_error_first_batchbuffer(dev_priv, |
| 834 | batchbuffer[1] = NULL; | 815 | &dev_priv->ring[i]); |
| 835 | count = 0; | ||
| 836 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { | ||
| 837 | if (batchbuffer[0] == NULL && | ||
| 838 | bbaddr >= obj->gtt_offset && | ||
| 839 | bbaddr < obj->gtt_offset + obj->base.size) | ||
| 840 | batchbuffer[0] = obj; | ||
| 841 | |||
| 842 | if (batchbuffer[1] == NULL && | ||
| 843 | error->acthd >= obj->gtt_offset && | ||
| 844 | error->acthd < obj->gtt_offset + obj->base.size) | ||
| 845 | batchbuffer[1] = obj; | ||
| 846 | |||
| 847 | count++; | ||
| 848 | } | ||
| 849 | /* Scan the other lists for completeness for those bizarre errors. */ | ||
| 850 | if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { | ||
| 851 | list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) { | ||
| 852 | if (batchbuffer[0] == NULL && | ||
| 853 | bbaddr >= obj->gtt_offset && | ||
| 854 | bbaddr < obj->gtt_offset + obj->base.size) | ||
| 855 | batchbuffer[0] = obj; | ||
| 856 | |||
| 857 | if (batchbuffer[1] == NULL && | ||
| 858 | error->acthd >= obj->gtt_offset && | ||
| 859 | error->acthd < obj->gtt_offset + obj->base.size) | ||
| 860 | batchbuffer[1] = obj; | ||
| 861 | |||
| 862 | if (batchbuffer[0] && batchbuffer[1]) | ||
| 863 | break; | ||
| 864 | } | ||
| 865 | } | ||
| 866 | if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { | ||
| 867 | list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { | ||
| 868 | if (batchbuffer[0] == NULL && | ||
| 869 | bbaddr >= obj->gtt_offset && | ||
| 870 | bbaddr < obj->gtt_offset + obj->base.size) | ||
| 871 | batchbuffer[0] = obj; | ||
| 872 | |||
| 873 | if (batchbuffer[1] == NULL && | ||
| 874 | error->acthd >= obj->gtt_offset && | ||
| 875 | error->acthd < obj->gtt_offset + obj->base.size) | ||
| 876 | batchbuffer[1] = obj; | ||
| 877 | |||
| 878 | if (batchbuffer[0] && batchbuffer[1]) | ||
| 879 | break; | ||
| 880 | } | ||
| 881 | } | ||
| 882 | |||
| 883 | /* We need to copy these to an anonymous buffer as the simplest | ||
| 884 | * method to avoid being overwritten by userspace. | ||
| 885 | */ | ||
| 886 | error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]); | ||
| 887 | if (batchbuffer[1] != batchbuffer[0]) | ||
| 888 | error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); | ||
| 889 | else | ||
| 890 | error->batchbuffer[1] = NULL; | ||
| 891 | 816 | ||
| 892 | /* Record the ringbuffer */ | 817 | /* Record the ringbuffer */ |
| 893 | error->ringbuffer = i915_error_object_create(dev, | 818 | error->ringbuffer = i915_error_object_create(dev_priv, |
| 894 | dev_priv->ring[RCS].obj); | 819 | dev_priv->ring[RCS].obj); |
| 895 | 820 | ||
| 896 | /* Record buffers on the active and pinned lists. */ | 821 | /* Record buffers on the active and pinned lists. */ |
| 897 | error->active_bo = NULL; | 822 | error->active_bo = NULL; |
| 898 | error->pinned_bo = NULL; | 823 | error->pinned_bo = NULL; |
| 899 | 824 | ||
| 900 | error->active_bo_count = count; | 825 | i = 0; |
| 826 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) | ||
| 827 | i++; | ||
| 828 | error->active_bo_count = i; | ||
| 901 | list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) | 829 | list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) |
| 902 | count++; | 830 | i++; |
| 903 | error->pinned_bo_count = count - error->active_bo_count; | 831 | error->pinned_bo_count = i - error->active_bo_count; |
| 904 | 832 | ||
| 905 | if (count) { | 833 | if (i) { |
| 906 | error->active_bo = kmalloc(sizeof(*error->active_bo)*count, | 834 | error->active_bo = kmalloc(sizeof(*error->active_bo)*i, |
| 907 | GFP_ATOMIC); | 835 | GFP_ATOMIC); |
| 908 | if (error->active_bo) | 836 | if (error->active_bo) |
| 909 | error->pinned_bo = | 837 | error->pinned_bo = |
