aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorBen Widawsky <ben@bwidawsk.net>2011-12-12 22:21:57 -0500
committerKeith Packard <keithp@keithp.com>2012-01-03 12:09:44 -0500
commite2971bdab2b761683353da383c0fd5ac704d1cca (patch)
treee85a6591108ae7a5aa0cf0bb2bb6a1bc85b1ae5e /drivers
parent7a7e8734ac3235efafd34819b27fbdf5417e6d60 (diff)
drm/i915: relative_constants_mode race fix
dev_priv keeps track of the current addressing mode that gets set at execbuffer time. Unfortunately the existing code was doing this before acquiring struct_mutex which leaves a race with another thread also doing an execbuffer. If that wasn't bad enough, relocate_slow drops struct_mutex which opens a much more likely error where another thread comes in and modifies the state while relocate_slow is being slow. The solution here is to just defer setting this state until we absolutely need it, and we know we'll have struct_mutex for the remainder of our code path. v2: Keith noticed a bug in the original patch. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Keith Packard <keithp@keithp.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c29
1 files changed, 16 insertions, 13 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index c681dc149d2a..68e5b41dc7f2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1033,19 +1033,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1033 if (INTEL_INFO(dev)->gen > 5 && 1033 if (INTEL_INFO(dev)->gen > 5 &&
1034 mode == I915_EXEC_CONSTANTS_REL_SURFACE) 1034 mode == I915_EXEC_CONSTANTS_REL_SURFACE)
1035 return -EINVAL; 1035 return -EINVAL;
1036
1037 ret = intel_ring_begin(ring, 4);
1038 if (ret)
1039 return ret;
1040
1041 intel_ring_emit(ring, MI_NOOP);
1042 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1043 intel_ring_emit(ring, INSTPM);
1044 intel_ring_emit(ring,
1045 I915_EXEC_CONSTANTS_MASK << 16 | mode);
1046 intel_ring_advance(ring);
1047
1048 dev_priv->relative_constants_mode = mode;
1049 } 1036 }
1050 break; 1037 break;
1051 default: 1038 default:
@@ -1176,6 +1163,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1176 } 1163 }
1177 } 1164 }
1178 1165
1166 if (ring == &dev_priv->ring[RCS] &&
1167 mode != dev_priv->relative_constants_mode) {
1168 ret = intel_ring_begin(ring, 4);
1169 if (ret)
1170 goto err;
1171
1172 intel_ring_emit(ring, MI_NOOP);
1173 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1174 intel_ring_emit(ring, INSTPM);
1175 intel_ring_emit(ring,
1176 I915_EXEC_CONSTANTS_MASK << 16 | mode);
1177 intel_ring_advance(ring);
1178
1179 dev_priv->relative_constants_mode = mode;
1180 }
1181
1179 trace_i915_gem_ring_dispatch(ring, seqno); 1182 trace_i915_gem_ring_dispatch(ring, seqno);
1180 1183
1181 exec_start = batch_obj->gtt_offset + args->batch_start_offset; 1184 exec_start = batch_obj->gtt_offset + args->batch_start_offset;