aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2013-11-26 06:23:15 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-11-27 03:04:36 -0500
commita415d355645ca5e8797235a76026ca2622ceefdb (patch)
tree75cf3f853d0b4f80ef4ae4e6e7332e3425aa4351 /drivers/gpu/drm/i915
parent7c063c725987406d743cc7de7625ff224fab75de (diff)
drm/i915: Pin relocations for the duration of constructing the execbuffer
As the execbuffer dispatch grows ever more complex and involves multiple stages of moving objects into the aperture, we need to take greater care that we do not evict our execbuffer objects prior to dispatch. This is relatively simple as we can just keep the objects pinned for not just the relocation but until we are finished. One such example is the possibility of the context switch causing an eviction or hitting the shrinker in order to fit its object into the aperture. Link: http://lists.freedesktop.org/archives/intel-gfx/2013-November/036166.html Reported-by: "Siluvery, Arun" <arun.siluvery@intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Ben Widawsky <benjamin.widawsky@intel.com> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: stable@vger.kernel.org Acked-by: Ben Widawsky <ben@bwidawsk.net> Tested-by: Ben Widawsky <ben@bwidawsk.net> [danvet: Add the additional explanations from Chris to the commit message.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c60
1 files changed, 32 insertions, 28 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 885d595e0e02..b7e787fb4649 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -33,6 +33,9 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include <linux/dma_remapping.h> 34#include <linux/dma_remapping.h>
35 35
36#define __EXEC_OBJECT_HAS_PIN (1<<31)
37#define __EXEC_OBJECT_HAS_FENCE (1<<30)
38
36struct eb_vmas { 39struct eb_vmas {
37 struct list_head vmas; 40 struct list_head vmas;
38 int and; 41 int and;
@@ -187,7 +190,28 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
187 } 190 }
188} 191}
189 192
190static void eb_destroy(struct eb_vmas *eb) { 193static void
194i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
195{
196 struct drm_i915_gem_exec_object2 *entry;
197 struct drm_i915_gem_object *obj = vma->obj;
198
199 if (!drm_mm_node_allocated(&vma->node))
200 return;
201
202 entry = vma->exec_entry;
203
204 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
205 i915_gem_object_unpin_fence(obj);
206
207 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
208 i915_gem_object_unpin(obj);
209
210 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
211}
212
213static void eb_destroy(struct eb_vmas *eb)
214{
191 while (!list_empty(&eb->vmas)) { 215 while (!list_empty(&eb->vmas)) {
192 struct i915_vma *vma; 216 struct i915_vma *vma;
193 217
@@ -195,6 +219,7 @@ static void eb_destroy(struct eb_vmas *eb) {
195 struct i915_vma, 219 struct i915_vma,
196 exec_list); 220 exec_list);
197 list_del_init(&vma->exec_list); 221 list_del_init(&vma->exec_list);
222 i915_gem_execbuffer_unreserve_vma(vma);
198 drm_gem_object_unreference(&vma->obj->base); 223 drm_gem_object_unreference(&vma->obj->base);
199 } 224 }
200 kfree(eb); 225 kfree(eb);
@@ -478,9 +503,6 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb,
478 return ret; 503 return ret;
479} 504}
480 505
481#define __EXEC_OBJECT_HAS_PIN (1<<31)
482#define __EXEC_OBJECT_HAS_FENCE (1<<30)
483
484static int 506static int
485need_reloc_mappable(struct i915_vma *vma) 507need_reloc_mappable(struct i915_vma *vma)
486{ 508{
@@ -552,26 +574,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
552 return 0; 574 return 0;
553} 575}
554 576
555static void
556i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
557{
558 struct drm_i915_gem_exec_object2 *entry;
559 struct drm_i915_gem_object *obj = vma->obj;
560
561 if (!drm_mm_node_allocated(&vma->node))
562 return;
563
564 entry = vma->exec_entry;
565
566 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
567 i915_gem_object_unpin_fence(obj);
568
569 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
570 i915_gem_object_unpin(obj);
571
572 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
573}
574
575static int 577static int
576i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 578i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
577 struct list_head *vmas, 579 struct list_head *vmas,
@@ -670,13 +672,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
670 goto err; 672 goto err;
671 } 673 }
672 674
673err: /* Decrement pin count for bound objects */ 675err:
674 list_for_each_entry(vma, vmas, exec_list)
675 i915_gem_execbuffer_unreserve_vma(vma);
676
677 if (ret != -ENOSPC || retry++) 676 if (ret != -ENOSPC || retry++)
678 return ret; 677 return ret;
679 678
679 /* Decrement pin count for bound objects */
680 list_for_each_entry(vma, vmas, exec_list)
681 i915_gem_execbuffer_unreserve_vma(vma);
682
680 ret = i915_gem_evict_vm(vm, true); 683 ret = i915_gem_evict_vm(vm, true);
681 if (ret) 684 if (ret)
682 return ret; 685 return ret;
@@ -708,6 +711,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
708 while (!list_empty(&eb->vmas)) { 711 while (!list_empty(&eb->vmas)) {
709 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list); 712 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
710 list_del_init(&vma->exec_list); 713 list_del_init(&vma->exec_list);
714 i915_gem_execbuffer_unreserve_vma(vma);
711 drm_gem_object_unreference(&vma->obj->base); 715 drm_gem_object_unreference(&vma->obj->base);
712 } 716 }
713 717