aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem_execbuffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_execbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c88
1 files changed, 52 insertions, 36 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 885d595e0e02..a3ba9a8cd687 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -33,6 +33,9 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include <linux/dma_remapping.h> 34#include <linux/dma_remapping.h>
35 35
36#define __EXEC_OBJECT_HAS_PIN (1<<31)
37#define __EXEC_OBJECT_HAS_FENCE (1<<30)
38
36struct eb_vmas { 39struct eb_vmas {
37 struct list_head vmas; 40 struct list_head vmas;
38 int and; 41 int and;
@@ -90,7 +93,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
90{ 93{
91 struct drm_i915_gem_object *obj; 94 struct drm_i915_gem_object *obj;
92 struct list_head objects; 95 struct list_head objects;
93 int i, ret = 0; 96 int i, ret;
94 97
95 INIT_LIST_HEAD(&objects); 98 INIT_LIST_HEAD(&objects);
96 spin_lock(&file->table_lock); 99 spin_lock(&file->table_lock);
@@ -103,7 +106,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
103 DRM_DEBUG("Invalid object handle %d at index %d\n", 106 DRM_DEBUG("Invalid object handle %d at index %d\n",
104 exec[i].handle, i); 107 exec[i].handle, i);
105 ret = -ENOENT; 108 ret = -ENOENT;
106 goto out; 109 goto err;
107 } 110 }
108 111
109 if (!list_empty(&obj->obj_exec_link)) { 112 if (!list_empty(&obj->obj_exec_link)) {
@@ -111,7 +114,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
111 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", 114 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
112 obj, exec[i].handle, i); 115 obj, exec[i].handle, i);
113 ret = -EINVAL; 116 ret = -EINVAL;
114 goto out; 117 goto err;
115 } 118 }
116 119
117 drm_gem_object_reference(&obj->base); 120 drm_gem_object_reference(&obj->base);
@@ -120,9 +123,13 @@ eb_lookup_vmas(struct eb_vmas *eb,
120 spin_unlock(&file->table_lock); 123 spin_unlock(&file->table_lock);
121 124
122 i = 0; 125 i = 0;
123 list_for_each_entry(obj, &objects, obj_exec_link) { 126 while (!list_empty(&objects)) {
124 struct i915_vma *vma; 127 struct i915_vma *vma;
125 128
129 obj = list_first_entry(&objects,
130 struct drm_i915_gem_object,
131 obj_exec_link);
132
126 /* 133 /*
127 * NOTE: We can leak any vmas created here when something fails 134 * NOTE: We can leak any vmas created here when something fails
128 * later on. But that's no issue since vma_unbind can deal with 135 * later on. But that's no issue since vma_unbind can deal with
@@ -135,10 +142,12 @@ eb_lookup_vmas(struct eb_vmas *eb,
135 if (IS_ERR(vma)) { 142 if (IS_ERR(vma)) {
136 DRM_DEBUG("Failed to lookup VMA\n"); 143 DRM_DEBUG("Failed to lookup VMA\n");
137 ret = PTR_ERR(vma); 144 ret = PTR_ERR(vma);
138 goto out; 145 goto err;
139 } 146 }
140 147
148 /* Transfer ownership from the objects list to the vmas list. */
141 list_add_tail(&vma->exec_list, &eb->vmas); 149 list_add_tail(&vma->exec_list, &eb->vmas);
150 list_del_init(&obj->obj_exec_link);
142 151
143 vma->exec_entry = &exec[i]; 152 vma->exec_entry = &exec[i];
144 if (eb->and < 0) { 153 if (eb->and < 0) {
@@ -152,16 +161,22 @@ eb_lookup_vmas(struct eb_vmas *eb,
152 ++i; 161 ++i;
153 } 162 }
154 163
164 return 0;
155 165
156out: 166
167err:
157 while (!list_empty(&objects)) { 168 while (!list_empty(&objects)) {
158 obj = list_first_entry(&objects, 169 obj = list_first_entry(&objects,
159 struct drm_i915_gem_object, 170 struct drm_i915_gem_object,
160 obj_exec_link); 171 obj_exec_link);
161 list_del_init(&obj->obj_exec_link); 172 list_del_init(&obj->obj_exec_link);
162 if (ret) 173 drm_gem_object_unreference(&obj->base);
163 drm_gem_object_unreference(&obj->base);
164 } 174 }
175 /*
176 * Objects already transfered to the vmas list will be unreferenced by
177 * eb_destroy.
178 */
179
165 return ret; 180 return ret;
166} 181}
167 182
@@ -187,7 +202,28 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
187 } 202 }
188} 203}
189 204
190static void eb_destroy(struct eb_vmas *eb) { 205static void
206i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
207{
208 struct drm_i915_gem_exec_object2 *entry;
209 struct drm_i915_gem_object *obj = vma->obj;
210
211 if (!drm_mm_node_allocated(&vma->node))
212 return;
213
214 entry = vma->exec_entry;
215
216 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
217 i915_gem_object_unpin_fence(obj);
218
219 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
220 i915_gem_object_unpin(obj);
221
222 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
223}
224
225static void eb_destroy(struct eb_vmas *eb)
226{
191 while (!list_empty(&eb->vmas)) { 227 while (!list_empty(&eb->vmas)) {
192 struct i915_vma *vma; 228 struct i915_vma *vma;
193 229
@@ -195,6 +231,7 @@ static void eb_destroy(struct eb_vmas *eb) {
195 struct i915_vma, 231 struct i915_vma,
196 exec_list); 232 exec_list);
197 list_del_init(&vma->exec_list); 233 list_del_init(&vma->exec_list);
234 i915_gem_execbuffer_unreserve_vma(vma);
198 drm_gem_object_unreference(&vma->obj->base); 235 drm_gem_object_unreference(&vma->obj->base);
199 } 236 }
200 kfree(eb); 237 kfree(eb);
@@ -478,9 +515,6 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb,
478 return ret; 515 return ret;
479} 516}
480 517
481#define __EXEC_OBJECT_HAS_PIN (1<<31)
482#define __EXEC_OBJECT_HAS_FENCE (1<<30)
483
484static int 518static int
485need_reloc_mappable(struct i915_vma *vma) 519need_reloc_mappable(struct i915_vma *vma)
486{ 520{
@@ -552,26 +586,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
552 return 0; 586 return 0;
553} 587}
554 588
555static void
556i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
557{
558 struct drm_i915_gem_exec_object2 *entry;
559 struct drm_i915_gem_object *obj = vma->obj;
560
561 if (!drm_mm_node_allocated(&vma->node))
562 return;
563
564 entry = vma->exec_entry;
565
566 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
567 i915_gem_object_unpin_fence(obj);
568
569 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
570 i915_gem_object_unpin(obj);
571
572 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
573}
574
575static int 589static int
576i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 590i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
577 struct list_head *vmas, 591 struct list_head *vmas,
@@ -670,13 +684,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
670 goto err; 684 goto err;
671 } 685 }
672 686
673err: /* Decrement pin count for bound objects */ 687err:
674 list_for_each_entry(vma, vmas, exec_list)
675 i915_gem_execbuffer_unreserve_vma(vma);
676
677 if (ret != -ENOSPC || retry++) 688 if (ret != -ENOSPC || retry++)
678 return ret; 689 return ret;
679 690
691 /* Decrement pin count for bound objects */
692 list_for_each_entry(vma, vmas, exec_list)
693 i915_gem_execbuffer_unreserve_vma(vma);
694
680 ret = i915_gem_evict_vm(vm, true); 695 ret = i915_gem_evict_vm(vm, true);
681 if (ret) 696 if (ret)
682 return ret; 697 return ret;
@@ -708,6 +723,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
708 while (!list_empty(&eb->vmas)) { 723 while (!list_empty(&eb->vmas)) {
709 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list); 724 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
710 list_del_init(&vma->exec_list); 725 list_del_init(&vma->exec_list);
726 i915_gem_execbuffer_unreserve_vma(vma);
711 drm_gem_object_unreference(&vma->obj->base); 727 drm_gem_object_unreference(&vma->obj->base);
712 } 728 }
713 729