aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2014-03-21 03:40:56 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-03-21 11:13:08 -0400
commitdf6f783a4ef6790780a67c491897ac45c4618735 (patch)
treeef6e84fa4126674c02770514ffdf7a8c9f9b20e6
parent4726e0b045b80c514377da35ca01467ef6a4de53 (diff)
drm/i915: Fix unsafe loop iteration over vma whilst unbinding them
On non-LLC platforms, when changing the cache level of an object, we may need to unbind it so that prefetching across page boundaries does not cross into a different memory domain. This requires us to unbind conflicting vma, but we did so iterating over the objects vma in an unsafe manner (as the list was being modified as we iterated). The regression was introduced in commit 3089c6f239d7d2c4cb2dd5c353e8984cf79af1d7 Author: Ben Widawsky <ben@bwidawsk.net> Date: Wed Jul 31 17:00:03 2013 -0700 drm/i915: make caching operate on all address spaces apparently as far back as v3.12-rc1, but it has only just begun to trigger real world bug reports. Reported-and-tested-by: Nikolay Martynov <mar.kolya@gmail.com> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=76384 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Ben Widawsky <ben@bwidawsk.net> Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ee32759ffce3..33bbaa0d4412 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3467,7 +3467,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3467 enum i915_cache_level cache_level) 3467 enum i915_cache_level cache_level)
3468{ 3468{
3469 struct drm_device *dev = obj->base.dev; 3469 struct drm_device *dev = obj->base.dev;
3470 struct i915_vma *vma; 3470 struct i915_vma *vma, *next;
3471 int ret; 3471 int ret;
3472 3472
3473 if (obj->cache_level == cache_level) 3473 if (obj->cache_level == cache_level)
@@ -3478,7 +3478,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3478 return -EBUSY; 3478 return -EBUSY;
3479 } 3479 }
3480 3480
3481 list_for_each_entry(vma, &obj->vma_list, vma_link) { 3481 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
3482 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) { 3482 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
3483 ret = i915_vma_unbind(vma); 3483 ret = i915_vma_unbind(vma);
3484 if (ret) 3484 if (ret)