aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2017-11-23 10:26:31 -0500
committerChris Wilson <chris@chris-wilson.co.uk>2017-11-23 11:12:06 -0500
commit8911a31c813275882fdc15554235a914e678759e (patch)
treedd394daf60fa8460f5dac52f8d5a6fd25ab0bff7 /drivers/gpu/drm/i915/intel_ringbuffer.c
parentb1c24a6137af11fca49192a42face03cacbd7fc5 (diff)
drm/i915: Move mi_set_context() into the legacy ringbuffer submission
The legacy i915_switch_context() is only applicable to the legacy ringbuffer submission method, so move it from the general i915_gem_context.c to intel_ringbuffer.c (rename pending!). Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171123152631.31385-2-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c186
1 files changed, 185 insertions, 1 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index a904b0353bec..e2085820b586 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1385,6 +1385,190 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
1385 intel_ring_reset(engine->buffer, 0); 1385 intel_ring_reset(engine->buffer, 0);
1386} 1386}
1387 1387
1388static inline int mi_set_context(struct drm_i915_gem_request *rq, u32 flags)
1389{
1390 struct drm_i915_private *i915 = rq->i915;
1391 struct intel_engine_cs *engine = rq->engine;
1392 enum intel_engine_id id;
1393 const int num_rings =
1394 /* Use an extended w/a on gen7 if signalling from other rings */
1395 (HAS_LEGACY_SEMAPHORES(i915) && IS_GEN7(i915)) ?
1396 INTEL_INFO(i915)->num_rings - 1 :
1397 0;
1398 int len;
1399 u32 *cs;
1400
1401 flags |= MI_MM_SPACE_GTT;
1402 if (IS_HASWELL(i915))
1403 /* These flags are for resource streamer on HSW+ */
1404 flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
1405 else
1406 flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
1407
1408 len = 4;
1409 if (IS_GEN7(i915))
1410 len += 2 + (num_rings ? 4*num_rings + 6 : 0);
1411
1412 cs = intel_ring_begin(rq, len);
1413 if (IS_ERR(cs))
1414 return PTR_ERR(cs);
1415
1416 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
1417 if (IS_GEN7(i915)) {
1418 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1419 if (num_rings) {
1420 struct intel_engine_cs *signaller;
1421
1422 *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
1423 for_each_engine(signaller, i915, id) {
1424 if (signaller == engine)
1425 continue;
1426
1427 *cs++ = i915_mmio_reg_offset(
1428 RING_PSMI_CTL(signaller->mmio_base));
1429 *cs++ = _MASKED_BIT_ENABLE(
1430 GEN6_PSMI_SLEEP_MSG_DISABLE);
1431 }
1432 }
1433 }
1434
1435 *cs++ = MI_NOOP;
1436 *cs++ = MI_SET_CONTEXT;
1437 *cs++ = i915_ggtt_offset(rq->ctx->engine[RCS].state) | flags;
1438 /*
1439 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
1440 * WaMiSetContext_Hang:snb,ivb,vlv
1441 */
1442 *cs++ = MI_NOOP;
1443
1444 if (IS_GEN7(i915)) {
1445 if (num_rings) {
1446 struct intel_engine_cs *signaller;
1447 i915_reg_t last_reg = {}; /* keep gcc quiet */
1448
1449 *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
1450 for_each_engine(signaller, i915, id) {
1451 if (signaller == engine)
1452 continue;
1453
1454 last_reg = RING_PSMI_CTL(signaller->mmio_base);
1455 *cs++ = i915_mmio_reg_offset(last_reg);
1456 *cs++ = _MASKED_BIT_DISABLE(
1457 GEN6_PSMI_SLEEP_MSG_DISABLE);
1458 }
1459
1460 /* Insert a delay before the next switch! */
1461 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1462 *cs++ = i915_mmio_reg_offset(last_reg);
1463 *cs++ = i915_ggtt_offset(engine->scratch);
1464 *cs++ = MI_NOOP;
1465 }
1466 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
1467 }
1468
1469 intel_ring_advance(rq, cs);
1470
1471 return 0;
1472}
1473
1474static int remap_l3(struct drm_i915_gem_request *rq, int slice)
1475{
1476 u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
1477 int i;
1478
1479 if (!remap_info)
1480 return 0;
1481
1482 cs = intel_ring_begin(rq, GEN7_L3LOG_SIZE/4 * 2 + 2);
1483 if (IS_ERR(cs))
1484 return PTR_ERR(cs);
1485
1486 /*
1487 * Note: We do not worry about the concurrent register cacheline hang
1488 * here because no other code should access these registers other than
1489 * at initialization time.
1490 */
1491 *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
1492 for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
1493 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
1494 *cs++ = remap_info[i];
1495 }
1496 *cs++ = MI_NOOP;
1497 intel_ring_advance(rq, cs);
1498
1499 return 0;
1500}
1501
1502static int switch_context(struct drm_i915_gem_request *rq)
1503{
1504 struct intel_engine_cs *engine = rq->engine;
1505 struct i915_gem_context *to_ctx = rq->ctx;
1506 struct i915_hw_ppgtt *to_mm =
1507 to_ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
1508 struct i915_gem_context *from_ctx = engine->legacy_active_context;
1509 struct i915_hw_ppgtt *from_mm = engine->legacy_active_ppgtt;
1510 u32 hw_flags = 0;
1511 int ret, i;
1512
1513 lockdep_assert_held(&rq->i915->drm.struct_mutex);
1514 GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
1515
1516 if (to_mm != from_mm ||
1517 (to_mm && intel_engine_flag(engine) & to_mm->pd_dirty_rings)) {
1518 trace_switch_mm(engine, to_ctx);
1519 ret = to_mm->switch_mm(to_mm, rq);
1520 if (ret)
1521 goto err;
1522
1523 to_mm->pd_dirty_rings &= ~intel_engine_flag(engine);
1524 engine->legacy_active_ppgtt = to_mm;
1525 hw_flags = MI_FORCE_RESTORE;
1526 }
1527
1528 if (to_ctx->engine[engine->id].state &&
1529 (to_ctx != from_ctx || hw_flags & MI_FORCE_RESTORE)) {
1530 GEM_BUG_ON(engine->id != RCS);
1531
1532 /*
1533 * The kernel context(s) is treated as pure scratch and is not
1534 * expected to retain any state (as we sacrifice it during
1535 * suspend and on resume it may be corrupted). This is ok,
1536 * as nothing actually executes using the kernel context; it
1537 * is purely used for flushing user contexts.
1538 */
1539 if (i915_gem_context_is_kernel(to_ctx))
1540 hw_flags = MI_RESTORE_INHIBIT;
1541
1542 ret = mi_set_context(rq, hw_flags);
1543 if (ret)
1544 goto err_mm;
1545
1546 engine->legacy_active_context = to_ctx;
1547 }
1548
1549 if (to_ctx->remap_slice) {
1550 for (i = 0; i < MAX_L3_SLICES; i++) {
1551 if (!(to_ctx->remap_slice & BIT(i)))
1552 continue;
1553
1554 ret = remap_l3(rq, i);
1555 if (ret)
1556 goto err_ctx;
1557 }
1558
1559 to_ctx->remap_slice = 0;
1560 }
1561
1562 return 0;
1563
1564err_ctx:
1565 engine->legacy_active_context = from_ctx;
1566err_mm:
1567 engine->legacy_active_ppgtt = from_mm;
1568err:
1569 return ret;
1570}
1571
1388static int ring_request_alloc(struct drm_i915_gem_request *request) 1572static int ring_request_alloc(struct drm_i915_gem_request *request)
1389{ 1573{
1390 int ret; 1574 int ret;
@@ -1401,7 +1585,7 @@ static int ring_request_alloc(struct drm_i915_gem_request *request)
1401 if (ret) 1585 if (ret)
1402 return ret; 1586 return ret;
1403 1587
1404 ret = i915_switch_context(request); 1588 ret = switch_context(request);
1405 if (ret) 1589 if (ret)
1406 return ret; 1590 return ret;
1407 1591