aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-10-17 07:09:54 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-10-17 15:06:59 -0400
commitd7d4eeddb8f72342f70621c4b3cb718af9361712 (patch)
treee8a11c37fa8dbaf9e93859e91812ff131ca0e20b /drivers/gpu/drm/i915/intel_ringbuffer.c
parent76e438303403f301f3509479b544e41518edd059 (diff)
drm/i915: Allow DRM_ROOT_ONLY|DRM_MASTER to submit privileged batchbuffers
With the introduction of per-process GTT space, the hardware designers thought it wise to also limit the ability to write to MMIO space to only a "secure" batch buffer. The ability to rewrite registers is the only way to program the hardware to perform certain operations like scanline waits (required for tear-free windowed updates). So we either have a choice of adding an interface to perform those synchronized updates inside the kernel, or we permit certain processes the ability to write to the "safe" registers from within its command stream. This patch exposes the ability to submit a SECURE batch buffer to DRM_ROOT_ONLY|DRM_MASTER processes. v2: Haswell split up bit8 into a ppgtt bit (still bit8) and a security bit (bit 13, accidentally not set). Also add a comment explaining why secure batches need a global gtt binding. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> (v1) [danvet: added hsw fixup.] Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c48
1 files changed, 39 insertions, 9 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 984a0c5fbf5d..6c6f95a534b1 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -965,7 +965,9 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
965} 965}
966 966
967static int 967static int
968i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) 968i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
969 u32 offset, u32 length,
970 unsigned flags)
969{ 971{
970 int ret; 972 int ret;
971 973
@@ -976,7 +978,7 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
976 intel_ring_emit(ring, 978 intel_ring_emit(ring,
977 MI_BATCH_BUFFER_START | 979 MI_BATCH_BUFFER_START |
978 MI_BATCH_GTT | 980 MI_BATCH_GTT |
979 MI_BATCH_NON_SECURE_I965); 981 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
980 intel_ring_emit(ring, offset); 982 intel_ring_emit(ring, offset);
981 intel_ring_advance(ring); 983 intel_ring_advance(ring);
982 984
@@ -985,7 +987,8 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
985 987
986static int 988static int
987i830_dispatch_execbuffer(struct intel_ring_buffer *ring, 989i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
988 u32 offset, u32 len) 990 u32 offset, u32 len,
991 unsigned flags)
989{ 992{
990 int ret; 993 int ret;
991 994
@@ -994,7 +997,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
994 return ret; 997 return ret;
995 998
996 intel_ring_emit(ring, MI_BATCH_BUFFER); 999 intel_ring_emit(ring, MI_BATCH_BUFFER);
997 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); 1000 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
998 intel_ring_emit(ring, offset + len - 8); 1001 intel_ring_emit(ring, offset + len - 8);
999 intel_ring_emit(ring, 0); 1002 intel_ring_emit(ring, 0);
1000 intel_ring_advance(ring); 1003 intel_ring_advance(ring);
@@ -1004,7 +1007,8 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
1004 1007
1005static int 1008static int
1006i915_dispatch_execbuffer(struct intel_ring_buffer *ring, 1009i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
1007 u32 offset, u32 len) 1010 u32 offset, u32 len,
1011 unsigned flags)
1008{ 1012{
1009 int ret; 1013 int ret;
1010 1014
@@ -1013,7 +1017,7 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
1013 return ret; 1017 return ret;
1014 1018
1015 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 1019 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1016 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); 1020 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1017 intel_ring_advance(ring); 1021 intel_ring_advance(ring);
1018 1022
1019 return 0; 1023 return 0;
@@ -1403,8 +1407,30 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
1403} 1407}
1404 1408
1405static int 1409static int
1410hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1411 u32 offset, u32 len,
1412 unsigned flags)
1413{
1414 int ret;
1415
1416 ret = intel_ring_begin(ring, 2);
1417 if (ret)
1418 return ret;
1419
1420 intel_ring_emit(ring,
1421 MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
1422 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
1423 /* bit0-7 is the length on GEN6+ */
1424 intel_ring_emit(ring, offset);
1425 intel_ring_advance(ring);
1426
1427 return 0;
1428}
1429
1430static int
1406gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, 1431gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1407 u32 offset, u32 len) 1432 u32 offset, u32 len,
1433 unsigned flags)
1408{ 1434{
1409 int ret; 1435 int ret;
1410 1436
@@ -1412,7 +1438,9 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1412 if (ret) 1438 if (ret)
1413 return ret; 1439 return ret;
1414 1440
1415 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); 1441 intel_ring_emit(ring,
1442 MI_BATCH_BUFFER_START |
1443 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
1416 /* bit0-7 is the length on GEN6+ */ 1444 /* bit0-7 is the length on GEN6+ */
1417 intel_ring_emit(ring, offset); 1445 intel_ring_emit(ring, offset);
1418 intel_ring_advance(ring); 1446 intel_ring_advance(ring);
@@ -1491,7 +1519,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1491 ring->irq_enable_mask = I915_USER_INTERRUPT; 1519 ring->irq_enable_mask = I915_USER_INTERRUPT;
1492 } 1520 }
1493 ring->write_tail = ring_write_tail; 1521 ring->write_tail = ring_write_tail;
1494 if (INTEL_INFO(dev)->gen >= 6) 1522 if (IS_HASWELL(dev))
1523 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
1524 else if (INTEL_INFO(dev)->gen >= 6)
1495 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 1525 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1496 else if (INTEL_INFO(dev)->gen >= 4) 1526 else if (INTEL_INFO(dev)->gen >= 4)
1497 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 1527 ring->dispatch_execbuffer = i965_dispatch_execbuffer;