summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJon Bloomfield <jon.bloomfield@intel.com>2018-09-20 12:58:36 -0400
committerJon Bloomfield <jon.bloomfield@intel.com>2019-11-05 14:38:34 -0500
commitf8c08d8faee5567803c8c533865296ca30286bbf (patch)
treebe750332b60d751184606ca6c4c0d7be6dddce52
parent0546a29cd884fb8184731c79ab008927ca8859d0 (diff)
drm/i915/cmdparser: Add support for backward jumps
To keep things manageable, the pre-gen9 cmdparser does not attempt to track any form of nested BB_START's. This did not prevent usermode from using nested starts, or even chained batches because the cmdparser is not strictly enforced pre gen9. Instead, the existence of a nested BB_START would cause the batch to be emitted in insecure mode, and any privileged capabilities would not be available. For Gen9, the cmdparser becomes mandatory (for BCS at least), and so not providing any form of nested BB_START support becomes overly restrictive. Any such batch will simply not run. We make heavy use of backward jumps in igt, and it is much easier to add support for this restricted subset of nested jumps, than to rewrite the whole of our test suite to avoid them. Add the required logic to support limited backward jumps, to instructions that have already been validated by the parser. Note that it's not sufficient to simply approve any BB_START that jumps backwards in the buffer because this would allow an attacker to embed a rogue instruction sequence within the operand words of a harmless instruction (say LRI) and jump to that. We introduce a bit array to track every instr offset successfully validated, and test the target of BB_START against this. If the target offset hits, it is re-written to the same offset in the shadow buffer and the BB_START cmd is allowed. Note: This patch deliberately ignores checkpatch issues in the cmdtables, in order to match the style of the surrounding code. We'll correct the entire file in one go in a later patch. v2: set dispatch secure late (Mika) v3: rebase (Mika) v4: Clear whitelist on each parse Minor review updates (Chris) v5: Correct backward jump batching v6: fix compilation error due to struct eb shuffle (Mika) Cc: Tony Luck <tony.luck@intel.com> Cc: Dave Airlie <airlied@redhat.com> Cc: Takashi Iwai <tiwai@suse.de> Cc: Tyler Hicks <tyhicks@canonical.com> Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com> Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Reviewed-by: Chris Wilson <chris.p.wilson@intel.com>
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c32
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c151
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h9
5 files changed, 178 insertions, 26 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 1cdfe05514c3..e41fd94ae5a9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -319,6 +319,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
319 free_engines(rcu_access_pointer(ctx->engines)); 319 free_engines(rcu_access_pointer(ctx->engines));
320 mutex_destroy(&ctx->engines_mutex); 320 mutex_destroy(&ctx->engines_mutex);
321 321
322 kfree(ctx->jump_whitelist);
323
322 if (ctx->timeline) 324 if (ctx->timeline)
323 intel_timeline_put(ctx->timeline); 325 intel_timeline_put(ctx->timeline);
324 326
@@ -441,6 +443,9 @@ __create_context(struct drm_i915_private *i915)
441 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) 443 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
442 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; 444 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
443 445
446 ctx->jump_whitelist = NULL;
447 ctx->jump_whitelist_cmds = 0;
448
444 return ctx; 449 return ctx;
445 450
446err_free: 451err_free:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 260d59cc3de8..00537b9d7006 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -192,6 +192,13 @@ struct i915_gem_context {
192 * per vm, which may be one per context or shared with the global GTT) 192 * per vm, which may be one per context or shared with the global GTT)
193 */ 193 */
194 struct radix_tree_root handles_vma; 194 struct radix_tree_root handles_vma;
195
196 /** jump_whitelist: Bit array for tracking cmds during cmdparsing
197 * Guarded by struct_mutex
198 */
199 unsigned long *jump_whitelist;
200 /** jump_whitelist_cmds: No of cmd slots available */
201 u32 jump_whitelist_cmds;
195}; 202};
196 203
197#endif /* __I915_GEM_CONTEXT_TYPES_H__ */ 204#endif /* __I915_GEM_CONTEXT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 8237b2935b5f..e635e1e5f4d3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1972,7 +1972,6 @@ shadow_batch_pin(struct i915_execbuffer *eb, struct drm_i915_gem_object *obj)
1972 if (CMDPARSER_USES_GGTT(dev_priv)) { 1972 if (CMDPARSER_USES_GGTT(dev_priv)) {
1973 flags = PIN_GLOBAL; 1973 flags = PIN_GLOBAL;
1974 vm = &dev_priv->ggtt.vm; 1974 vm = &dev_priv->ggtt.vm;
1975 eb->batch_flags |= I915_DISPATCH_SECURE;
1976 } else if (vma->vm->has_read_only) { 1975 } else if (vma->vm->has_read_only) {
1977 flags = PIN_USER; 1976 flags = PIN_USER;
1978 vm = vma->vm; 1977 vm = vma->vm;
@@ -1989,18 +1988,35 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
1989{ 1988{
1990 struct intel_engine_pool_node *pool; 1989 struct intel_engine_pool_node *pool;
1991 struct i915_vma *vma; 1990 struct i915_vma *vma;
1991 u64 batch_start;
1992 u64 shadow_batch_start;
1992 int err; 1993 int err;
1993 1994
1994 pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len); 1995 pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len);
1995 if (IS_ERR(pool)) 1996 if (IS_ERR(pool))
1996 return ERR_CAST(pool); 1997 return ERR_CAST(pool);
1997 1998
1998 err = intel_engine_cmd_parser(eb->engine, 1999 vma = shadow_batch_pin(eb, pool->obj);
2000 if (IS_ERR(vma))
2001 goto err;
2002
2003 batch_start = gen8_canonical_addr(eb->batch->node.start) +
2004 eb->batch_start_offset;
2005
2006 shadow_batch_start = gen8_canonical_addr(vma->node.start);
2007
2008 err = intel_engine_cmd_parser(eb->gem_context,
2009 eb->engine,
1999 eb->batch->obj, 2010 eb->batch->obj,
2000 pool->obj, 2011 batch_start,
2001 eb->batch_start_offset, 2012 eb->batch_start_offset,
2002 eb->batch_len); 2013 eb->batch_len,
2014 pool->obj,
2015 shadow_batch_start);
2016
2003 if (err) { 2017 if (err) {
2018 i915_vma_unpin(vma);
2019
2004 /* 2020 /*
2005 * Unsafe GGTT-backed buffers can still be submitted safely 2021 * Unsafe GGTT-backed buffers can still be submitted safely
2006 * as non-secure. 2022 * as non-secure.
@@ -2015,10 +2031,6 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
2015 goto err; 2031 goto err;
2016 } 2032 }
2017 2033
2018 vma = shadow_batch_pin(eb, pool->obj);
2019 if (IS_ERR(vma))
2020 goto err;
2021
2022 eb->vma[eb->buffer_count] = i915_vma_get(vma); 2034 eb->vma[eb->buffer_count] = i915_vma_get(vma);
2023 eb->flags[eb->buffer_count] = 2035 eb->flags[eb->buffer_count] =
2024 __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF; 2036 __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
@@ -2027,6 +2039,10 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb)
2027 2039
2028 eb->batch_start_offset = 0; 2040 eb->batch_start_offset = 0;
2029 eb->batch = vma; 2041 eb->batch = vma;
2042
2043 if (CMDPARSER_USES_GGTT(eb->i915))
2044 eb->batch_flags |= I915_DISPATCH_SECURE;
2045
2030 /* eb->batch_len unchanged */ 2046 /* eb->batch_len unchanged */
2031 2047
2032 vma->private = pool; 2048 vma->private = pool;
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index dc5bcbc3ba6e..365eea2b95bd 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -483,6 +483,19 @@ static const struct drm_i915_cmd_descriptor gen9_blt_cmds[] = {
483 .reg = { .offset = 1, .mask = 0x007FFFFC } ), 483 .reg = { .offset = 1, .mask = 0x007FFFFC } ),
484 CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W, 484 CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
485 .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ), 485 .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
486
487 /*
488 * We allow BB_START but apply further checks. We just sanitize the
489 * basic fields here.
490 */
491#define MI_BB_START_OPERAND_MASK GENMASK(SMI-1, 0)
492#define MI_BB_START_OPERAND_EXPECT (MI_BATCH_PPGTT_HSW | 1)
493 CMD( MI_BATCH_BUFFER_START_GEN8, SMI, !F, 0xFF, B,
494 .bits = {{
495 .offset = 0,
496 .mask = MI_BB_START_OPERAND_MASK,
497 .expected = MI_BB_START_OPERAND_EXPECT,
498 }}, ),
486}; 499};
487 500
488static const struct drm_i915_cmd_descriptor noop_desc = 501static const struct drm_i915_cmd_descriptor noop_desc =
@@ -1293,15 +1306,113 @@ static bool check_cmd(const struct intel_engine_cs *engine,
1293 return true; 1306 return true;
1294} 1307}
1295 1308
1309static int check_bbstart(const struct i915_gem_context *ctx,
1310 u32 *cmd, u32 offset, u32 length,
1311 u32 batch_len,
1312 u64 batch_start,
1313 u64 shadow_batch_start)
1314{
1315 u64 jump_offset, jump_target;
1316 u32 target_cmd_offset, target_cmd_index;
1317
1318 /* For igt compatibility on older platforms */
1319 if (CMDPARSER_USES_GGTT(ctx->i915)) {
1320 DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n");
1321 return -EACCES;
1322 }
1323
1324 if (length != 3) {
1325 DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n",
1326 length);
1327 return -EINVAL;
1328 }
1329
1330 jump_target = *(u64*)(cmd+1);
1331 jump_offset = jump_target - batch_start;
1332
1333 /*
1334 * Any underflow of jump_target is guaranteed to be outside the range
1335 * of a u32, so >= test catches both too large and too small
1336 */
1337 if (jump_offset >= batch_len) {
1338 DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n",
1339 jump_target);
1340 return -EINVAL;
1341 }
1342
1343 /*
1344 * This cannot overflow a u32 because we already checked jump_offset
1345 * is within the BB, and the batch_len is a u32
1346 */
1347 target_cmd_offset = lower_32_bits(jump_offset);
1348 target_cmd_index = target_cmd_offset / sizeof(u32);
1349
1350 *(u64*)(cmd + 1) = shadow_batch_start + target_cmd_offset;
1351
1352 if (target_cmd_index == offset)
1353 return 0;
1354
1355 if (ctx->jump_whitelist_cmds <= target_cmd_index) {
1356 DRM_DEBUG("CMD: Rejecting BB_START - truncated whitelist array\n");
1357 return -EINVAL;
1358 } else if (!test_bit(target_cmd_index, ctx->jump_whitelist)) {
1359 DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
1360 jump_target);
1361 return -EINVAL;
1362 }
1363
1364 return 0;
1365}
1366
1367static void init_whitelist(struct i915_gem_context *ctx, u32 batch_len)
1368{
1369 const u32 batch_cmds = DIV_ROUND_UP(batch_len, sizeof(u32));
1370 const u32 exact_size = BITS_TO_LONGS(batch_cmds);
1371 u32 next_size = BITS_TO_LONGS(roundup_pow_of_two(batch_cmds));
1372 unsigned long *next_whitelist;
1373
1374 if (CMDPARSER_USES_GGTT(ctx->i915))
1375 return;
1376
1377 if (batch_cmds <= ctx->jump_whitelist_cmds) {
1378 memset(ctx->jump_whitelist, 0, exact_size * sizeof(u32));
1379 return;
1380 }
1381
1382again:
1383 next_whitelist = kcalloc(next_size, sizeof(long), GFP_KERNEL);
1384 if (next_whitelist) {
1385 kfree(ctx->jump_whitelist);
1386 ctx->jump_whitelist = next_whitelist;
1387 ctx->jump_whitelist_cmds =
1388 next_size * BITS_PER_BYTE * sizeof(long);
1389 return;
1390 }
1391
1392 if (next_size > exact_size) {
1393 next_size = exact_size;
1394 goto again;
1395 }
1396
1397 DRM_DEBUG("CMD: Failed to extend whitelist. BB_START may be disallowed\n");
1398 memset(ctx->jump_whitelist, 0,
1399 BITS_TO_LONGS(ctx->jump_whitelist_cmds) * sizeof(u32));
1400
1401 return;
1402}
1403
1296#define LENGTH_BIAS 2 1404#define LENGTH_BIAS 2
1297 1405
1298/** 1406/**
1299 * i915_parse_cmds() - parse a submitted batch buffer for privilege violations 1407 * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
1408 * @ctx: the context in which the batch is to execute
1300 * @engine: the engine on which the batch is to execute 1409 * @engine: the engine on which the batch is to execute
1301 * @batch_obj: the batch buffer in question 1410 * @batch_obj: the batch buffer in question
1302 * @shadow_batch_obj: copy of the batch buffer in question 1411 * @batch_start: Canonical base address of batch
1303 * @batch_start_offset: byte offset in the batch at which execution starts 1412 * @batch_start_offset: byte offset in the batch at which execution starts
1304 * @batch_len: length of the commands in batch_obj 1413 * @batch_len: length of the commands in batch_obj
1414 * @shadow_batch_obj: copy of the batch buffer in question
1415 * @shadow_batch_start: Canonical base address of shadow_batch_obj
1305 * 1416 *
1306 * Parses the specified batch buffer looking for privilege violations as 1417 * Parses the specified batch buffer looking for privilege violations as
1307 * described in the overview. 1418 * described in the overview.
@@ -1309,13 +1420,17 @@ static bool check_cmd(const struct intel_engine_cs *engine,
1309 * Return: non-zero if the parser finds violations or otherwise fails; -EACCES 1420 * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
1310 * if the batch appears legal but should use hardware parsing 1421 * if the batch appears legal but should use hardware parsing
1311 */ 1422 */
1312int intel_engine_cmd_parser(struct intel_engine_cs *engine, 1423
1424int intel_engine_cmd_parser(struct i915_gem_context *ctx,
1425 struct intel_engine_cs *engine,
1313 struct drm_i915_gem_object *batch_obj, 1426 struct drm_i915_gem_object *batch_obj,
1314 struct drm_i915_gem_object *shadow_batch_obj, 1427 u64 batch_start,
1315 u32 batch_start_offset, 1428 u32 batch_start_offset,
1316 u32 batch_len) 1429 u32 batch_len,
1430 struct drm_i915_gem_object *shadow_batch_obj,
1431 u64 shadow_batch_start)
1317{ 1432{
1318 u32 *cmd, *batch_end; 1433 u32 *cmd, *batch_end, offset = 0;
1319 struct drm_i915_cmd_descriptor default_desc = noop_desc; 1434 struct drm_i915_cmd_descriptor default_desc = noop_desc;
1320 const struct drm_i915_cmd_descriptor *desc = &default_desc; 1435 const struct drm_i915_cmd_descriptor *desc = &default_desc;
1321 bool needs_clflush_after = false; 1436 bool needs_clflush_after = false;
@@ -1329,6 +1444,8 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
1329 return PTR_ERR(cmd); 1444 return PTR_ERR(cmd);
1330 } 1445 }
1331 1446
1447 init_whitelist(ctx, batch_len);
1448
1332 /* 1449 /*
1333 * We use the batch length as size because the shadow object is as 1450 * We use the batch length as size because the shadow object is as
1334 * large or larger and copy_batch() will write MI_NOPs to the extra 1451 * large or larger and copy_batch() will write MI_NOPs to the extra
@@ -1349,16 +1466,6 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
1349 goto err; 1466 goto err;
1350 } 1467 }
1351 1468
1352 /*
1353 * We don't try to handle BATCH_BUFFER_START because it adds
1354 * non-trivial complexity. Instead we abort the scan and return
1355 * and error to indicate that the batch is unsafe.
1356 */
1357 if (desc->cmd.value == MI_BATCH_BUFFER_START) {
1358 ret = -EACCES;
1359 goto err;
1360 }
1361
1362 if (desc->flags & CMD_DESC_FIXED) 1469 if (desc->flags & CMD_DESC_FIXED)
1363 length = desc->length.fixed; 1470 length = desc->length.fixed;
1364 else 1471 else
@@ -1378,7 +1485,21 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
1378 goto err; 1485 goto err;
1379 } 1486 }
1380 1487
1488 if (desc->cmd.value == MI_BATCH_BUFFER_START) {
1489 ret = check_bbstart(ctx, cmd, offset, length,
1490 batch_len, batch_start,
1491 shadow_batch_start);
1492
1493 if (ret)
1494 goto err;
1495 break;
1496 }
1497
1498 if (ctx->jump_whitelist_cmds > offset)
1499 set_bit(offset, ctx->jump_whitelist);
1500
1381 cmd += length; 1501 cmd += length;
1502 offset += length;
1382 if (cmd >= batch_end) { 1503 if (cmd >= batch_end) {
1383 DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n"); 1504 DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
1384 ret = -EINVAL; 1505 ret = -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5b338e1b79fd..b20424e66097 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2408,11 +2408,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
2408int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); 2408int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
2409void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); 2409void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
2410void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); 2410void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
2411int intel_engine_cmd_parser(struct intel_engine_cs *engine, 2411int intel_engine_cmd_parser(struct i915_gem_context *cxt,
2412 struct intel_engine_cs *engine,
2412 struct drm_i915_gem_object *batch_obj, 2413 struct drm_i915_gem_object *batch_obj,
2413 struct drm_i915_gem_object *shadow_batch_obj, 2414 u64 user_batch_start,
2414 u32 batch_start_offset, 2415 u32 batch_start_offset,
2415 u32 batch_len); 2416 u32 batch_len,
2417 struct drm_i915_gem_object *shadow_batch_obj,
2418 u64 shadow_batch_start);
2416 2419
2417/* intel_device_info.c */ 2420/* intel_device_info.c */
2418static inline struct intel_device_info * 2421static inline struct intel_device_info *