aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2013-01-08 05:53:15 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-01-17 16:08:02 -0500
commitbcffc3faa692d6b2ef734e4f0c8f097175284db6 (patch)
tree84633b4990a2e826b5cf7c8dcd745b658318c8cd
parent3b96eff447b4ca34ca8ccd42e2651be2955f34b4 (diff)
drm/i915: Move the execbuffer objects list from the stack into the tracker
Instead of passing around the eb-objects hashtable and a separate object list, we can include the object list into the eb-objects structure for convenience. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Imre Deak <imre.deak@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c58
1 files changed, 27 insertions, 31 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index da103c179e3f..386677f8fd38 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -34,6 +34,7 @@
34#include <linux/dma_remapping.h> 34#include <linux/dma_remapping.h>
35 35
36struct eb_objects { 36struct eb_objects {
37 struct list_head objects;
37 int and; 38 int and;
38 struct hlist_head buckets[0]; 39 struct hlist_head buckets[0];
39}; 40};
@@ -53,6 +54,7 @@ eb_create(int size)
53 return eb; 54 return eb;
54 55
55 eb->and = count - 1; 56 eb->and = count - 1;
57 INIT_LIST_HEAD(&eb->objects);
56 return eb; 58 return eb;
57} 59}
58 60
@@ -73,8 +75,7 @@ static int
73eb_lookup_objects(struct eb_objects *eb, 75eb_lookup_objects(struct eb_objects *eb,
74 struct drm_i915_gem_exec_object2 *exec, 76 struct drm_i915_gem_exec_object2 *exec,
75 int count, 77 int count,
76 struct drm_file *file, 78 struct drm_file *file)
77 struct list_head *objects)
78{ 79{
79 int i; 80 int i;
80 81
@@ -98,7 +99,7 @@ eb_lookup_objects(struct eb_objects *eb,
98 } 99 }
99 100
100 drm_gem_object_reference(&obj->base); 101 drm_gem_object_reference(&obj->base);
101 list_add_tail(&obj->exec_list, objects); 102 list_add_tail(&obj->exec_list, &eb->objects);
102 103
103 obj->exec_handle = exec[i].handle; 104 obj->exec_handle = exec[i].handle;
104 obj->exec_entry = &exec[i]; 105 obj->exec_entry = &exec[i];
@@ -129,6 +130,15 @@ eb_get_object(struct eb_objects *eb, unsigned long handle)
129static void 130static void
130eb_destroy(struct eb_objects *eb) 131eb_destroy(struct eb_objects *eb)
131{ 132{
133 while (!list_empty(&eb->objects)) {
134 struct drm_i915_gem_object *obj;
135
136 obj = list_first_entry(&eb->objects,
137 struct drm_i915_gem_object,
138 exec_list);
139 list_del_init(&obj->exec_list);
140 drm_gem_object_unreference(&obj->base);
141 }
132 kfree(eb); 142 kfree(eb);
133} 143}
134 144
@@ -328,8 +338,7 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
328 338
329static int 339static int
330i915_gem_execbuffer_relocate(struct drm_device *dev, 340i915_gem_execbuffer_relocate(struct drm_device *dev,
331 struct eb_objects *eb, 341 struct eb_objects *eb)
332 struct list_head *objects)
333{ 342{
334 struct drm_i915_gem_object *obj; 343 struct drm_i915_gem_object *obj;
335 int ret = 0; 344 int ret = 0;
@@ -342,7 +351,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
342 * lockdep complains vehemently. 351 * lockdep complains vehemently.
343 */ 352 */
344 pagefault_disable(); 353 pagefault_disable();
345 list_for_each_entry(obj, objects, exec_list) { 354 list_for_each_entry(obj, &eb->objects, exec_list) {
346 ret = i915_gem_execbuffer_relocate_object(obj, eb); 355 ret = i915_gem_execbuffer_relocate_object(obj, eb);
347 if (ret) 356 if (ret)
348 break; 357 break;
@@ -531,7 +540,6 @@ static int
531i915_gem_execbuffer_relocate_slow(struct drm_device *dev, 540i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
532 struct drm_file *file, 541 struct drm_file *file,
533 struct intel_ring_buffer *ring, 542 struct intel_ring_buffer *ring,
534 struct list_head *objects,
535 struct eb_objects *eb, 543 struct eb_objects *eb,
536 struct drm_i915_gem_exec_object2 *exec, 544 struct drm_i915_gem_exec_object2 *exec,
537 int count) 545 int count)
@@ -542,8 +550,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
542 int i, total, ret; 550 int i, total, ret;
543 551
544 /* We may process another execbuffer during the unlock... */ 552 /* We may process another execbuffer during the unlock... */
545 while (!list_empty(objects)) { 553 while (!list_empty(&eb->objects)) {
546 obj = list_first_entry(objects, 554 obj = list_first_entry(&eb->objects,
547 struct drm_i915_gem_object, 555 struct drm_i915_gem_object,
548 exec_list); 556 exec_list);
549 list_del_init(&obj->exec_list); 557 list_del_init(&obj->exec_list);
@@ -590,15 +598,15 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
590 598
591 /* reacquire the objects */ 599 /* reacquire the objects */
592 eb_reset(eb); 600 eb_reset(eb);
593 ret = eb_lookup_objects(eb, exec, count, file, objects); 601 ret = eb_lookup_objects(eb, exec, count, file);
594 if (ret) 602 if (ret)
595 goto err; 603 goto err;
596 604
597 ret = i915_gem_execbuffer_reserve(ring, file, objects); 605 ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects);
598 if (ret) 606 if (ret)
599 goto err; 607 goto err;
600 608
601 list_for_each_entry(obj, objects, exec_list) { 609 list_for_each_entry(obj, &eb->objects, exec_list) {
602 int offset = obj->exec_entry - exec; 610 int offset = obj->exec_entry - exec;
603 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, 611 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
604 reloc + reloc_offset[offset]); 612 reloc + reloc_offset[offset]);
@@ -756,7 +764,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
756 struct drm_i915_gem_exec_object2 *exec) 764 struct drm_i915_gem_exec_object2 *exec)
757{ 765{
758 drm_i915_private_t *dev_priv = dev->dev_private; 766 drm_i915_private_t *dev_priv = dev->dev_private;
759 struct list_head objects;
760 struct eb_objects *eb; 767 struct eb_objects *eb;
761 struct drm_i915_gem_object *batch_obj; 768 struct drm_i915_gem_object *batch_obj;
762 struct drm_clip_rect *cliprects = NULL; 769 struct drm_clip_rect *cliprects = NULL;
@@ -899,28 +906,26 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
899 } 906 }
900 907
901 /* Look up object handles */ 908 /* Look up object handles */
902 INIT_LIST_HEAD(&objects); 909 ret = eb_lookup_objects(eb, exec, args->buffer_count, file);
903 ret = eb_lookup_objects(eb, exec, args->buffer_count, file, &objects);
904 if (ret) 910 if (ret)
905 goto err; 911 goto err;
906 912
907 /* take note of the batch buffer before we might reorder the lists */ 913 /* take note of the batch buffer before we might reorder the lists */
908 batch_obj = list_entry(objects.prev, 914 batch_obj = list_entry(eb->objects.prev,
909 struct drm_i915_gem_object, 915 struct drm_i915_gem_object,
910 exec_list); 916 exec_list);
911 917
912 /* Move the objects en-masse into the GTT, evicting if necessary. */ 918 /* Move the objects en-masse into the GTT, evicting if necessary. */
913 ret = i915_gem_execbuffer_reserve(ring, file, &objects); 919 ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects);
914 if (ret) 920 if (ret)
915 goto err; 921 goto err;
916 922
917 /* The objects are in their final locations, apply the relocations. */ 923 /* The objects are in their final locations, apply the relocations. */
918 ret = i915_gem_execbuffer_relocate(dev, eb, &objects); 924 ret = i915_gem_execbuffer_relocate(dev, eb);
919 if (ret) { 925 if (ret) {
920 if (ret == -EFAULT) { 926 if (ret == -EFAULT) {
921 ret = i915_gem_execbuffer_relocate_slow(dev, file, ring, 927 ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
922 &objects, eb, 928 eb, exec,
923 exec,
924 args->buffer_count); 929 args->buffer_count);
925 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 930 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
926 } 931 }
@@ -943,7 +948,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
943 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) 948 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
944 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); 949 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
945 950
946 ret = i915_gem_execbuffer_move_to_gpu(ring, &objects); 951 ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
947 if (ret) 952 if (ret)
948 goto err; 953 goto err;
949 954
@@ -997,20 +1002,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
997 1002
998 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); 1003 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
999 1004
1000 i915_gem_execbuffer_move_to_active(&objects, ring); 1005 i915_gem_execbuffer_move_to_active(&eb->objects, ring);
1001 i915_gem_execbuffer_retire_commands(dev, file, ring); 1006 i915_gem_execbuffer_retire_commands(dev, file, ring);
1002 1007
1003err: 1008err:
1004 eb_destroy(eb); 1009 eb_destroy(eb);
1005 while (!list_empty(&objects)) {
1006 struct drm_i915_gem_object *obj;
1007
1008 obj = list_first_entry(&objects,
1009 struct drm_i915_gem_object,
1010 exec_list);
1011 list_del_init(&obj->exec_list);
1012 drm_gem_object_unreference(&obj->base);
1013 }
1014 1010
1015 mutex_unlock(&dev->struct_mutex); 1011 mutex_unlock(&dev->struct_mutex);
1016 1012