aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c29
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c111
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c4
4 files changed, 113 insertions, 39 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index cb4e9a63c835..5ccf98095389 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -77,7 +77,7 @@ enum plane {
77#define WATCH_COHERENCY 0 77#define WATCH_COHERENCY 0
78#define WATCH_EXEC 0 78#define WATCH_EXEC 0
79#define WATCH_RELOC 0 79#define WATCH_RELOC 0
80#define WATCH_INACTIVE 0 80#define WATCH_LISTS 0
81#define WATCH_PWRITE 0 81#define WATCH_PWRITE 0
82 82
83#define I915_GEM_PHYS_CURSOR_0 1 83#define I915_GEM_PHYS_CURSOR_0 1
@@ -1079,10 +1079,10 @@ bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
1079/* i915_gem_debug.c */ 1079/* i915_gem_debug.c */
1080void i915_gem_dump_object(struct drm_gem_object *obj, int len, 1080void i915_gem_dump_object(struct drm_gem_object *obj, int len,
1081 const char *where, uint32_t mark); 1081 const char *where, uint32_t mark);
1082#if WATCH_INACTIVE 1082#if WATCH_LISTS
1083void i915_verify_inactive(struct drm_device *dev, char *file, int line); 1083int i915_verify_lists(struct drm_device *dev);
1084#else 1084#else
1085#define i915_verify_inactive(dev, file, line) 1085#define i915_verify_lists(dev) 0
1086#endif 1086#endif
1087void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle); 1087void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
1088void i915_gem_dump_object(struct drm_gem_object *obj, int len, 1088void i915_gem_dump_object(struct drm_gem_object *obj, int len,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index fe1424c6c3fa..c3a7065947ce 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -109,6 +109,7 @@ static int i915_mutex_lock_interruptible(struct drm_device *dev)
109 return -EAGAIN; 109 return -EAGAIN;
110 } 110 }
111 111
112 WARN_ON(i915_verify_lists(dev));
112 return 0; 113 return 0;
113} 114}
114 115
@@ -1612,7 +1613,6 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1612 drm_i915_private_t *dev_priv = dev->dev_private; 1613 drm_i915_private_t *dev_priv = dev->dev_private;
1613 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1614 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1614 1615
1615 i915_verify_inactive(dev, __FILE__, __LINE__);
1616 if (obj_priv->pin_count != 0) 1616 if (obj_priv->pin_count != 0)
1617 list_move_tail(&obj_priv->list, &dev_priv->mm.pinned_list); 1617 list_move_tail(&obj_priv->list, &dev_priv->mm.pinned_list);
1618 else 1618 else
@@ -1626,7 +1626,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1626 obj_priv->active = 0; 1626 obj_priv->active = 0;
1627 drm_gem_object_unreference(obj); 1627 drm_gem_object_unreference(obj);
1628 } 1628 }
1629 i915_verify_inactive(dev, __FILE__, __LINE__); 1629 WARN_ON(i915_verify_lists(dev));
1630} 1630}
1631 1631
1632static void 1632static void
@@ -1821,6 +1821,8 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1821 list_empty(&ring->request_list)) 1821 list_empty(&ring->request_list))
1822 return; 1822 return;
1823 1823
1824 WARN_ON(i915_verify_lists(dev));
1825
1824 seqno = ring->get_seqno(dev, ring); 1826 seqno = ring->get_seqno(dev, ring);
1825 while (!list_empty(&ring->request_list)) { 1827 while (!list_empty(&ring->request_list)) {
1826 struct drm_i915_gem_request *request; 1828 struct drm_i915_gem_request *request;
@@ -1865,6 +1867,8 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1865 ring->user_irq_put(dev, ring); 1867 ring->user_irq_put(dev, ring);
1866 dev_priv->trace_irq_seqno = 0; 1868 dev_priv->trace_irq_seqno = 0;
1867 } 1869 }
1870
1871 WARN_ON(i915_verify_lists(dev));
1868} 1872}
1869 1873
1870void 1874void
@@ -3690,8 +3694,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3690 if (ret) 3694 if (ret)
3691 goto pre_mutex_err; 3695 goto pre_mutex_err;
3692 3696
3693 i915_verify_inactive(dev, __FILE__, __LINE__);
3694
3695 if (dev_priv->mm.suspended) { 3697 if (dev_priv->mm.suspended) {
3696 mutex_unlock(&dev->struct_mutex); 3698 mutex_unlock(&dev->struct_mutex);
3697 ret = -EBUSY; 3699 ret = -EBUSY;
@@ -3811,8 +3813,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3811 goto err; 3813 goto err;
3812 } 3814 }
3813 3815
3814 i915_verify_inactive(dev, __FILE__, __LINE__);
3815
3816 /* Zero the global flush/invalidate flags. These 3816 /* Zero the global flush/invalidate flags. These
3817 * will be modified as new domains are computed 3817 * will be modified as new domains are computed
3818 * for each object 3818 * for each object
@@ -3828,8 +3828,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3828 i915_gem_object_set_to_gpu_domain(obj); 3828 i915_gem_object_set_to_gpu_domain(obj);
3829 } 3829 }
3830 3830
3831 i915_verify_inactive(dev, __FILE__, __LINE__);
3832
3833 if (dev->invalidate_domains | dev->flush_domains) { 3831 if (dev->invalidate_domains | dev->flush_domains) {
3834#if WATCH_EXEC 3832#if WATCH_EXEC
3835 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", 3833 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
@@ -3860,8 +3858,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3860 old_write_domain); 3858 old_write_domain);
3861 } 3859 }
3862 3860
3863 i915_verify_inactive(dev, __FILE__, __LINE__);
3864
3865#if WATCH_COHERENCY 3861#if WATCH_COHERENCY
3866 for (i = 0; i < args->buffer_count; i++) { 3862 for (i = 0; i < args->buffer_count; i++) {
3867 i915_gem_object_check_coherency(object_list[i], 3863 i915_gem_object_check_coherency(object_list[i],
@@ -3890,8 +3886,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3890 */ 3886 */
3891 i915_retire_commands(dev, ring); 3887 i915_retire_commands(dev, ring);
3892 3888
3893 i915_verify_inactive(dev, __FILE__, __LINE__);
3894
3895 for (i = 0; i < args->buffer_count; i++) { 3889 for (i = 0; i < args->buffer_count; i++) {
3896 struct drm_gem_object *obj = object_list[i]; 3890 struct drm_gem_object *obj = object_list[i];
3897 obj_priv = to_intel_bo(obj); 3891 obj_priv = to_intel_bo(obj);
@@ -3902,8 +3896,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3902 i915_add_request(dev, file_priv, request, ring); 3896 i915_add_request(dev, file_priv, request, ring);
3903 request = NULL; 3897 request = NULL;
3904 3898
3905 i915_verify_inactive(dev, __FILE__, __LINE__);
3906
3907err: 3899err:
3908 for (i = 0; i < pinned; i++) 3900 for (i = 0; i < pinned; i++)
3909 i915_gem_object_unpin(object_list[i]); 3901 i915_gem_object_unpin(object_list[i]);
@@ -4094,8 +4086,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4094 int ret; 4086 int ret;
4095 4087
4096 BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); 4088 BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
4097 4089 WARN_ON(i915_verify_lists(dev));
4098 i915_verify_inactive(dev, __FILE__, __LINE__);
4099 4090
4100 if (obj_priv->gtt_space != NULL) { 4091 if (obj_priv->gtt_space != NULL) {
4101 if (alignment == 0) 4092 if (alignment == 0)
@@ -4129,8 +4120,8 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4129 list_move_tail(&obj_priv->list, 4120 list_move_tail(&obj_priv->list,
4130 &dev_priv->mm.pinned_list); 4121 &dev_priv->mm.pinned_list);
4131 } 4122 }
4132 i915_verify_inactive(dev, __FILE__, __LINE__);
4133 4123
4124 WARN_ON(i915_verify_lists(dev));
4134 return 0; 4125 return 0;
4135} 4126}
4136 4127
@@ -4141,7 +4132,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
4141 drm_i915_private_t *dev_priv = dev->dev_private; 4132 drm_i915_private_t *dev_priv = dev->dev_private;
4142 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 4133 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4143 4134
4144 i915_verify_inactive(dev, __FILE__, __LINE__); 4135 WARN_ON(i915_verify_lists(dev));
4145 obj_priv->pin_count--; 4136 obj_priv->pin_count--;
4146 BUG_ON(obj_priv->pin_count < 0); 4137 BUG_ON(obj_priv->pin_count < 0);
4147 BUG_ON(obj_priv->gtt_space == NULL); 4138 BUG_ON(obj_priv->gtt_space == NULL);
@@ -4157,7 +4148,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
4157 atomic_dec(&dev->pin_count); 4148 atomic_dec(&dev->pin_count);
4158 atomic_sub(obj->size, &dev->pin_memory); 4149 atomic_sub(obj->size, &dev->pin_memory);
4159 } 4150 }
4160 i915_verify_inactive(dev, __FILE__, __LINE__); 4151 WARN_ON(i915_verify_lists(dev));
4161} 4152}
4162 4153
4163int 4154int
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 2732c909a948..48644b840a8d 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -30,24 +30,107 @@
30#include "i915_drm.h" 30#include "i915_drm.h"
31#include "i915_drv.h" 31#include "i915_drv.h"
32 32
33#if WATCH_INACTIVE 33#if WATCH_LISTS
34void 34int
35i915_verify_inactive(struct drm_device *dev, char *file, int line) 35i915_verify_lists(struct drm_device *dev)
36{ 36{
37 static int warned;
37 drm_i915_private_t *dev_priv = dev->dev_private; 38 drm_i915_private_t *dev_priv = dev->dev_private;
38 struct drm_gem_object *obj; 39 struct drm_i915_gem_object *obj;
39 struct drm_i915_gem_object *obj_priv; 40 int err = 0;
40 41
41 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { 42 if (warned)
42 obj = &obj_priv->base; 43 return 0;
43 if (obj_priv->pin_count || obj_priv->active || 44
44 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | 45 list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) {
45 I915_GEM_DOMAIN_GTT))) 46 if (obj->base.dev != dev ||
46 DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n", 47 !atomic_read(&obj->base.refcount.refcount)) {
48 DRM_ERROR("freed render active %p\n", obj);
49 err++;
50 break;
51 } else if (!obj->active ||
52 (obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) {
53 DRM_ERROR("invalid render active %p (a %d r %x)\n",
54 obj,
55 obj->active,
56 obj->base.read_domains);
57 err++;
58 } else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) {
59 DRM_ERROR("invalid render active %p (w %x, gwl %d)\n",
60 obj,
61 obj->base.write_domain,
62 !list_empty(&obj->gpu_write_list));
63 err++;
64 }
65 }
66
67 list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) {
68 if (obj->base.dev != dev ||
69 !atomic_read(&obj->base.refcount.refcount)) {
70 DRM_ERROR("freed flushing %p\n", obj);
71 err++;
72 break;
73 } else if (!obj->active ||
74 (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
75 list_empty(&obj->gpu_write_list)){
76 DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
77 obj,
78 obj->active,
79 obj->base.write_domain,
80 !list_empty(&obj->gpu_write_list));
81 err++;
82 }
83 }
84
85 list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) {
86 if (obj->base.dev != dev ||
87 !atomic_read(&obj->base.refcount.refcount)) {
88 DRM_ERROR("freed gpu write %p\n", obj);
89 err++;
90 break;
91 } else if (!obj->active ||
92 (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) {
93 DRM_ERROR("invalid gpu write %p (a %d w %x)\n",
47 obj, 94 obj,
48 obj_priv->pin_count, obj_priv->active, 95 obj->active,
49 obj->write_domain, file, line); 96 obj->base.write_domain);
97 err++;
98 }
99 }
100
101 list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
102 if (obj->base.dev != dev ||
103 !atomic_read(&obj->base.refcount.refcount)) {
104 DRM_ERROR("freed inactive %p\n", obj);
105 err++;
106 break;
107 } else if (obj->pin_count || obj->active ||
108 (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
109 DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",
110 obj,
111 obj->pin_count, obj->active,
112 obj->base.write_domain);
113 err++;
114 }
50 } 115 }
116
117 list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {
118 if (obj->base.dev != dev ||
119 !atomic_read(&obj->base.refcount.refcount)) {
120 DRM_ERROR("freed pinned %p\n", obj);
121 err++;
122 break;
123 } else if (!obj->pin_count || obj->active ||
124 (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
125 DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
126 obj,
127 obj->pin_count, obj->active,
128 obj->base.write_domain);
129 err++;
130 }
131 }
132
133 return warned = err;
51} 134}
52#endif /* WATCH_INACTIVE */ 135#endif /* WATCH_INACTIVE */
53 136
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 5103b95cea93..d89b88791aac 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -579,6 +579,8 @@ int intel_init_ring_buffer(struct drm_device *dev,
579 int ret; 579 int ret;
580 580
581 ring->dev = dev; 581 ring->dev = dev;
582 INIT_LIST_HEAD(&ring->active_list);
583 INIT_LIST_HEAD(&ring->request_list);
582 584
583 if (I915_NEED_GFX_HWS(dev)) { 585 if (I915_NEED_GFX_HWS(dev)) {
584 ret = init_status_page(dev, ring); 586 ret = init_status_page(dev, ring);
@@ -627,8 +629,6 @@ int intel_init_ring_buffer(struct drm_device *dev,
627 if (ring->space < 0) 629 if (ring->space < 0)
628 ring->space += ring->size; 630 ring->space += ring->size;
629 } 631 }
630 INIT_LIST_HEAD(&ring->active_list);
631 INIT_LIST_HEAD(&ring->request_list);
632 return ret; 632 return ret;
633 633
634err_unmap: 634err_unmap: