aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Widawsky <benjamin.widawsky@intel.com>2013-09-11 17:57:50 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-09-12 15:58:22 -0400
commit68c8c17f527effba57f4e82efee18a249c6a1b58 (patch)
treea05723acd6d127cbe0a10815775821eb4746582b
parent7b7961220f1426aa795a3ded3404470b1c5749b6 (diff)
drm/i915: evict VM instead of everything
When reserving objects during execbuf, it is possible to come across an object which will not fit given the current fragmentation of the address space. We do not have any defragment in drm_mm, so the strategy is to instead evict everything, and reallocate objects. With the upcoming addition of multiple VMs, there is no point to evict everything since doing so is overkill for the specific case mentioned above. Recommended-by: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Ben Widawsky <ben@bwidawsk.net> [danvet: One additional s/evict_everything/evict_vm/ to update a comment in the code.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c19
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c8
3 files changed, 25 insertions, 3 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 81ba5bbc97fa..7caf71d52abe 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2106,6 +2106,7 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
2106 unsigned cache_level, 2106 unsigned cache_level,
2107 bool mappable, 2107 bool mappable,
2108 bool nonblock); 2108 bool nonblock);
2109int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
2109int i915_gem_evict_everything(struct drm_device *dev); 2110int i915_gem_evict_everything(struct drm_device *dev);
2110 2111
2111/* i915_gem_stolen.c */ 2112/* i915_gem_stolen.c */
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index e9033f02f498..3a3981eb3012 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -116,7 +116,7 @@ none:
116 } 116 }
117 117
118 /* We expect the caller to unpin, evict all and try again, or give up. 118 /* We expect the caller to unpin, evict all and try again, or give up.
119 * So calling i915_gem_evict_everything() is unnecessary. 119 * So calling i915_gem_evict_vm() is unnecessary.
120 */ 120 */
121 return -ENOSPC; 121 return -ENOSPC;
122 122
@@ -155,7 +155,22 @@ found:
155 return ret; 155 return ret;
156} 156}
157 157
158static int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) 158/**
159 * i915_gem_evict_vm - Try to free up VM space
160 *
161 * @vm: Address space to evict from
162 * @do_idle: Boolean directing whether to idle first.
163 *
164 * VM eviction is about freeing up virtual address space. If one wants fine
165 * grained eviction, they should see evict something for more details. In terms
166 * of freeing up actual system memory, this function may not accomplish the
167 * desired result. An object may be shared in multiple address space, and this
168 * function will not assert those objects be freed.
169 *
170 * Using do_idle will result in a more complete eviction because it retires, and
171 * inactivates current BOs.
172 */
173int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
159{ 174{
160 struct i915_vma *vma, *next; 175 struct i915_vma *vma, *next;
161 int ret; 176 int ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index c8a01c141644..ee933572bdc1 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -549,10 +549,16 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
549{ 549{
550 struct drm_i915_gem_object *obj; 550 struct drm_i915_gem_object *obj;
551 struct i915_vma *vma; 551 struct i915_vma *vma;
552 struct i915_address_space *vm;
552 struct list_head ordered_vmas; 553 struct list_head ordered_vmas;
553 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 554 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
554 int retry; 555 int retry;
555 556
557 if (list_empty(vmas))
558 return 0;
559
560 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
561
556 INIT_LIST_HEAD(&ordered_vmas); 562 INIT_LIST_HEAD(&ordered_vmas);
557 while (!list_empty(vmas)) { 563 while (!list_empty(vmas)) {
558 struct drm_i915_gem_exec_object2 *entry; 564 struct drm_i915_gem_exec_object2 *entry;
@@ -641,7 +647,7 @@ err: /* Decrement pin count for bound objects */
641 if (ret != -ENOSPC || retry++) 647 if (ret != -ENOSPC || retry++)
642 return ret; 648 return ret;
643 649
644 ret = i915_gem_evict_everything(ring->dev); 650 ret = i915_gem_evict_vm(vm, true);
645 if (ret) 651 if (ret)
646 return ret; 652 return ret;
647 } while (1); 653 } while (1);