aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorOscar Mateo <oscar.mateo@intel.com>2014-07-03 11:28:05 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-07-08 06:31:06 -0400
commit78382593e921c88371abd019aca8978db3248a8f (patch)
treeb8f8460a5519359f11fd46bacaf7838543c73dac /drivers/gpu/drm/i915
parent1b5d063fafeb01569ca988b1d4f45f64c67d2e5d (diff)
drm/i915: Extract the actual workload submission mechanism from execbuffer
So that we isolate the legacy ringbuffer submission mechanism, which becomes a good candidate to be abstracted away. This is prep-work for Execlists (which will its own workload submission mechanism). No functional changes. Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org> Signed-off-by: Oscar Mateo <oscar.mateo@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c298
1 files changed, 162 insertions, 136 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index c97178ebf2b5..60998fc4e5b2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1026,6 +1026,163 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
1026 return 0; 1026 return 0;
1027} 1027}
1028 1028
1029static int
1030legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
1031 struct intel_engine_cs *ring,
1032 struct intel_context *ctx,
1033 struct drm_i915_gem_execbuffer2 *args,
1034 struct list_head *vmas,
1035 struct drm_i915_gem_object *batch_obj,
1036 u64 exec_start, u32 flags)
1037{
1038 struct drm_clip_rect *cliprects = NULL;
1039 struct drm_i915_private *dev_priv = dev->dev_private;
1040 u64 exec_len;
1041 int instp_mode;
1042 u32 instp_mask;
1043 int i, ret = 0;
1044
1045 if (args->num_cliprects != 0) {
1046 if (ring != &dev_priv->ring[RCS]) {
1047 DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1048 return -EINVAL;
1049 }
1050
1051 if (INTEL_INFO(dev)->gen >= 5) {
1052 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
1053 return -EINVAL;
1054 }
1055
1056 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1057 DRM_DEBUG("execbuf with %u cliprects\n",
1058 args->num_cliprects);
1059 return -EINVAL;
1060 }
1061
1062 cliprects = kcalloc(args->num_cliprects,
1063 sizeof(*cliprects),
1064 GFP_KERNEL);
1065 if (cliprects == NULL) {
1066 ret = -ENOMEM;
1067 goto error;
1068 }
1069
1070 if (copy_from_user(cliprects,
1071 to_user_ptr(args->cliprects_ptr),
1072 sizeof(*cliprects)*args->num_cliprects)) {
1073 ret = -EFAULT;
1074 goto error;
1075 }
1076 } else {
1077 if (args->DR4 == 0xffffffff) {
1078 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1079 args->DR4 = 0;
1080 }
1081
1082 if (args->DR1 || args->DR4 || args->cliprects_ptr) {
1083 DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
1084 return -EINVAL;
1085 }
1086 }
1087
1088 ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
1089 if (ret)
1090 goto error;
1091
1092 ret = i915_switch_context(ring, ctx);
1093 if (ret)
1094 goto error;
1095
1096 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1097 instp_mask = I915_EXEC_CONSTANTS_MASK;
1098 switch (instp_mode) {
1099 case I915_EXEC_CONSTANTS_REL_GENERAL:
1100 case I915_EXEC_CONSTANTS_ABSOLUTE:
1101 case I915_EXEC_CONSTANTS_REL_SURFACE:
1102 if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
1103 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1104 ret = -EINVAL;
1105 goto error;
1106 }
1107
1108 if (instp_mode != dev_priv->relative_constants_mode) {
1109 if (INTEL_INFO(dev)->gen < 4) {
1110 DRM_DEBUG("no rel constants on pre-gen4\n");
1111 ret = -EINVAL;
1112 goto error;
1113 }
1114
1115 if (INTEL_INFO(dev)->gen > 5 &&
1116 instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1117 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1118 ret = -EINVAL;
1119 goto error;
1120 }
1121
1122 /* The HW changed the meaning on this bit on gen6 */
1123 if (INTEL_INFO(dev)->gen >= 6)
1124 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1125 }
1126 break;
1127 default:
1128 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
1129 ret = -EINVAL;
1130 goto error;
1131 }
1132
1133 if (ring == &dev_priv->ring[RCS] &&
1134 instp_mode != dev_priv->relative_constants_mode) {
1135 ret = intel_ring_begin(ring, 4);
1136 if (ret)
1137 goto error;
1138
1139 intel_ring_emit(ring, MI_NOOP);
1140 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1141 intel_ring_emit(ring, INSTPM);
1142 intel_ring_emit(ring, instp_mask << 16 | instp_mode);
1143 intel_ring_advance(ring);
1144
1145 dev_priv->relative_constants_mode = instp_mode;
1146 }
1147
1148 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1149 ret = i915_reset_gen7_sol_offsets(dev, ring);
1150 if (ret)
1151 goto error;
1152 }
1153
1154 exec_len = args->batch_len;
1155 if (cliprects) {
1156 for (i = 0; i < args->num_cliprects; i++) {
1157 ret = i915_emit_box(dev, &cliprects[i],
1158 args->DR1, args->DR4);
1159 if (ret)
1160 goto error;
1161
1162 ret = ring->dispatch_execbuffer(ring,
1163 exec_start, exec_len,
1164 flags);
1165 if (ret)
1166 goto error;
1167 }
1168 } else {
1169 ret = ring->dispatch_execbuffer(ring,
1170 exec_start, exec_len,
1171 flags);
1172 if (ret)
1173 return ret;
1174 }
1175
1176 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1177
1178 i915_gem_execbuffer_move_to_active(vmas, ring);
1179 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1180
1181error:
1182 kfree(cliprects);
1183 return ret;
1184}
1185
1029/** 1186/**
1030 * Find one BSD ring to dispatch the corresponding BSD command. 1187 * Find one BSD ring to dispatch the corresponding BSD command.
1031 * The Ring ID is returned. 1188 * The Ring ID is returned.
@@ -1085,14 +1242,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1085 struct drm_i915_private *dev_priv = dev->dev_private; 1242 struct drm_i915_private *dev_priv = dev->dev_private;
1086 struct eb_vmas *eb; 1243 struct eb_vmas *eb;
1087 struct drm_i915_gem_object *batch_obj; 1244 struct drm_i915_gem_object *batch_obj;
1088 struct drm_clip_rect *cliprects = NULL;
1089 struct intel_engine_cs *ring; 1245 struct intel_engine_cs *ring;
1090 struct intel_context *ctx; 1246 struct intel_context *ctx;
1091 struct i915_address_space *vm; 1247 struct i915_address_space *vm;
1092 const u32 ctx_id = i915_execbuffer2_get_context_id(*args); 1248 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1093 u64 exec_start = args->batch_start_offset, exec_len; 1249 u64 exec_start = args->batch_start_offset;
1094 u32 mask, flags; 1250 u32 flags;
1095 int ret, mode, i; 1251 int ret;
1096 bool need_relocs; 1252 bool need_relocs;
1097 1253
1098 if (!i915_gem_check_execbuffer(args)) 1254 if (!i915_gem_check_execbuffer(args))
@@ -1136,87 +1292,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1136 return -EINVAL; 1292 return -EINVAL;
1137 } 1293 }
1138 1294
1139 mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1140 mask = I915_EXEC_CONSTANTS_MASK;
1141 switch (mode) {
1142 case I915_EXEC_CONSTANTS_REL_GENERAL:
1143 case I915_EXEC_CONSTANTS_ABSOLUTE:
1144 case I915_EXEC_CONSTANTS_REL_SURFACE:
1145 if (mode != 0 && ring != &dev_priv->ring[RCS]) {
1146 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1147 return -EINVAL;
1148 }
1149
1150 if (mode != dev_priv->relative_constants_mode) {
1151 if (INTEL_INFO(dev)->gen < 4) {
1152 DRM_DEBUG("no rel constants on pre-gen4\n");
1153 return -EINVAL;
1154 }
1155
1156 if (INTEL_INFO(dev)->gen > 5 &&
1157 mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1158 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1159 return -EINVAL;
1160 }
1161
1162 /* The HW changed the meaning on this bit on gen6 */
1163 if (INTEL_INFO(dev)->gen >= 6)
1164 mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1165 }
1166 break;
1167 default:
1168 DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
1169 return -EINVAL;
1170 }
1171
1172 if (args->buffer_count < 1) { 1295 if (args->buffer_count < 1) {
1173 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); 1296 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1174 return -EINVAL; 1297 return -EINVAL;
1175 } 1298 }
1176 1299
1177 if (args->num_cliprects != 0) {
1178 if (ring != &dev_priv->ring[RCS]) {
1179 DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1180 return -EINVAL;
1181 }
1182
1183 if (INTEL_INFO(dev)->gen >= 5) {
1184 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
1185 return -EINVAL;
1186 }
1187
1188 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1189 DRM_DEBUG("execbuf with %u cliprects\n",
1190 args->num_cliprects);
1191 return -EINVAL;
1192 }
1193
1194 cliprects = kcalloc(args->num_cliprects,
1195 sizeof(*cliprects),
1196 GFP_KERNEL);
1197 if (cliprects == NULL) {
1198 ret = -ENOMEM;
1199 goto pre_mutex_err;
1200 }
1201
1202 if (copy_from_user(cliprects,
1203 to_user_ptr(args->cliprects_ptr),
1204 sizeof(*cliprects)*args->num_cliprects)) {
1205 ret = -EFAULT;
1206 goto pre_mutex_err;
1207 }
1208 } else {
1209 if (args->DR4 == 0xffffffff) {
1210 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1211 args->DR4 = 0;
1212 }
1213
1214 if (args->DR1 || args->DR4 || args->cliprects_ptr) {
1215 DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
1216 return -EINVAL;
1217 }
1218 }
1219
1220 intel_runtime_pm_get(dev_priv); 1300 intel_runtime_pm_get(dev_priv);
1221 1301
1222 ret = i915_mutex_lock_interruptible(dev); 1302 ret = i915_mutex_lock_interruptible(dev);
@@ -1320,63 +1400,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1320 else 1400 else
1321 exec_start += i915_gem_obj_offset(batch_obj, vm); 1401 exec_start += i915_gem_obj_offset(batch_obj, vm);
1322 1402
1323 ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas); 1403 ret = legacy_ringbuffer_submission(dev, file, ring, ctx,
1404 args, &eb->vmas, batch_obj, exec_start, flags);
1324 if (ret) 1405 if (ret)
1325 goto err; 1406 goto err;
1326 1407
1327 ret = i915_switch_context(ring, ctx);
1328 if (ret)
1329 goto err;
1330
1331 if (ring == &dev_priv->ring[RCS] &&
1332 mode != dev_priv->relative_constants_mode) {
1333 ret = intel_ring_begin(ring, 4);
1334 if (ret)
1335 goto err;
1336
1337 intel_ring_emit(ring, MI_NOOP);
1338 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1339 intel_ring_emit(ring, INSTPM);
1340 intel_ring_emit(ring, mask << 16 | mode);
1341 intel_ring_advance(ring);
1342
1343 dev_priv->relative_constants_mode = mode;
1344 }
1345
1346 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1347 ret = i915_reset_gen7_sol_offsets(dev, ring);
1348 if (ret)
1349 goto err;
1350 }
1351
1352
1353 exec_len = args->batch_len;
1354 if (cliprects) {
1355 for (i = 0; i < args->num_cliprects; i++) {
1356 ret = i915_emit_box(dev, &cliprects[i],
1357 args->DR1, args->DR4);
1358 if (ret)
1359 goto err;
1360
1361 ret = ring->dispatch_execbuffer(ring,
1362 exec_start, exec_len,
1363 flags);
1364 if (ret)
1365 goto err;
1366 }
1367 } else {
1368 ret = ring->dispatch_execbuffer(ring,
1369 exec_start, exec_len,
1370 flags);
1371 if (ret)
1372 goto err;
1373 }
1374
1375 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1376
1377 i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
1378 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1379
1380err: 1408err:
1381 /* the request owns the ref now */ 1409 /* the request owns the ref now */
1382 i915_gem_context_unreference(ctx); 1410 i915_gem_context_unreference(ctx);
@@ -1385,8 +1413,6 @@ err:
1385 mutex_unlock(&dev->struct_mutex); 1413 mutex_unlock(&dev->struct_mutex);
1386 1414
1387pre_mutex_err: 1415pre_mutex_err:
1388 kfree(cliprects);
1389
1390 /* intel_gpu_busy should also get a ref, so it will free when the device 1416 /* intel_gpu_busy should also get a ref, so it will free when the device
1391 * is really idle. */ 1417 * is really idle. */
1392 intel_runtime_pm_put(dev_priv); 1418 intel_runtime_pm_put(dev_priv);