aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-10-17 07:09:54 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-10-17 15:06:59 -0400
commitd7d4eeddb8f72342f70621c4b3cb718af9361712 (patch)
treee8a11c37fa8dbaf9e93859e91812ff131ca0e20b /drivers/gpu/drm/i915
parent76e438303403f301f3509479b544e41518edd059 (diff)
drm/i915: Allow DRM_ROOT_ONLY|DRM_MASTER to submit privileged batchbuffers
With the introduction of per-process GTT space, the hardware designers thought it wise to also limit the ability to write to MMIO space to only a "secure" batch buffer. The ability to rewrite registers is the only way to program the hardware to perform certain operations like scanline waits (required for tear-free windowed updates). So we either have a choice of adding an interface to perform those synchronized updates inside the kernel, or we permit certain processes the ability to write to the "safe" registers from within its command stream. This patch exposes the ability to submit a SECURE batch buffer to DRM_ROOT_ONLY|DRM_MASTER processes. v2: Haswell split up bit8 into a ppgtt bit (still bit8) and a security bit (bit 13, accidentally not set). Also add a comment explaining why secure batches need a global gtt binding. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> (v1) [danvet: added hsw fixup.] Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c25
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h7
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h10
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c48
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h4
6 files changed, 78 insertions, 19 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 491394fd94cd..14271aab72bb 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1015,6 +1015,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
1015 case I915_PARAM_HAS_PRIME_VMAP_FLUSH: 1015 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
1016 value = 1; 1016 value = 1;
1017 break; 1017 break;
1018 case I915_PARAM_HAS_SECURE_BATCHES:
1019 value = capable(CAP_SYS_ADMIN);
1020 break;
1018 default: 1021 default:
1019 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 1022 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
1020 param->param); 1023 param->param);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 6a2f3e50c714..afbc9240a992 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -801,6 +801,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
801 u32 exec_start, exec_len; 801 u32 exec_start, exec_len;
802 u32 seqno; 802 u32 seqno;
803 u32 mask; 803 u32 mask;
804 u32 flags;
804 int ret, mode, i; 805 int ret, mode, i;
805 806
806 if (!i915_gem_check_execbuffer(args)) { 807 if (!i915_gem_check_execbuffer(args)) {
@@ -812,6 +813,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
812 if (ret) 813 if (ret)
813 return ret; 814 return ret;
814 815
816 flags = 0;
817 if (args->flags & I915_EXEC_SECURE) {
818 if (!file->is_master || !capable(CAP_SYS_ADMIN))
819 return -EPERM;
820
821 flags |= I915_DISPATCH_SECURE;
822 }
823
815 switch (args->flags & I915_EXEC_RING_MASK) { 824 switch (args->flags & I915_EXEC_RING_MASK) {
816 case I915_EXEC_DEFAULT: 825 case I915_EXEC_DEFAULT:
817 case I915_EXEC_RENDER: 826 case I915_EXEC_RENDER:
@@ -984,6 +993,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
984 } 993 }
985 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; 994 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
986 995
996 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
997 * batch" bit. Hence we need to pin secure batches into the global gtt.
998 * hsw should have this fixed, but let's be paranoid and do it
999 * unconditionally for now. */
1000 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
1001 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
1002
987 ret = i915_gem_execbuffer_move_to_gpu(ring, &objects); 1003 ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
988 if (ret) 1004 if (ret)
989 goto err; 1005 goto err;
@@ -1029,7 +1045,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1029 goto err; 1045 goto err;
1030 } 1046 }
1031 1047
1032 trace_i915_gem_ring_dispatch(ring, seqno); 1048 trace_i915_gem_ring_dispatch(ring, seqno, flags);
1033 1049
1034 exec_start = batch_obj->gtt_offset + args->batch_start_offset; 1050 exec_start = batch_obj->gtt_offset + args->batch_start_offset;
1035 exec_len = args->batch_len; 1051 exec_len = args->batch_len;
@@ -1041,12 +1057,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1041 goto err; 1057 goto err;
1042 1058
1043 ret = ring->dispatch_execbuffer(ring, 1059 ret = ring->dispatch_execbuffer(ring,
1044 exec_start, exec_len); 1060 exec_start, exec_len,
1061 flags);
1045 if (ret) 1062 if (ret)
1046 goto err; 1063 goto err;
1047 } 1064 }
1048 } else { 1065 } else {
1049 ret = ring->dispatch_execbuffer(ring, exec_start, exec_len); 1066 ret = ring->dispatch_execbuffer(ring,
1067 exec_start, exec_len,
1068 flags);
1050 if (ret) 1069 if (ret)
1051 goto err; 1070 goto err;
1052 } 1071 }
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index d7f516c4855a..455beb4f690f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -244,8 +244,11 @@
244#define MI_INVALIDATE_TLB (1<<18) 244#define MI_INVALIDATE_TLB (1<<18)
245#define MI_INVALIDATE_BSD (1<<7) 245#define MI_INVALIDATE_BSD (1<<7)
246#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) 246#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
247#define MI_BATCH_NON_SECURE (1) 247#define MI_BATCH_NON_SECURE (1)
248#define MI_BATCH_NON_SECURE_I965 (1<<8) 248/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
249#define MI_BATCH_NON_SECURE_I965 (1<<8)
250#define MI_BATCH_PPGTT_HSW (1<<8)
251#define MI_BATCH_NON_SECURE_HSW (1<<13)
249#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) 252#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
250#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ 253#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
251#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ 254#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 8134421b89a6..3db4a6817713 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -229,24 +229,26 @@ TRACE_EVENT(i915_gem_evict_everything,
229); 229);
230 230
231TRACE_EVENT(i915_gem_ring_dispatch, 231TRACE_EVENT(i915_gem_ring_dispatch,
232 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 232 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
233 TP_ARGS(ring, seqno), 233 TP_ARGS(ring, seqno, flags),
234 234
235 TP_STRUCT__entry( 235 TP_STRUCT__entry(
236 __field(u32, dev) 236 __field(u32, dev)
237 __field(u32, ring) 237 __field(u32, ring)
238 __field(u32, seqno) 238 __field(u32, seqno)
239 __field(u32, flags)
239 ), 240 ),
240 241
241 TP_fast_assign( 242 TP_fast_assign(
242 __entry->dev = ring->dev->primary->index; 243 __entry->dev = ring->dev->primary->index;
243 __entry->ring = ring->id; 244 __entry->ring = ring->id;
244 __entry->seqno = seqno; 245 __entry->seqno = seqno;
246 __entry->flags = flags;
245 i915_trace_irq_get(ring, seqno); 247 i915_trace_irq_get(ring, seqno);
246 ), 248 ),
247 249
248 TP_printk("dev=%u, ring=%u, seqno=%u", 250 TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
249 __entry->dev, __entry->ring, __entry->seqno) 251 __entry->dev, __entry->ring, __entry->seqno, __entry->flags)
250); 252);
251 253
252TRACE_EVENT(i915_gem_ring_flush, 254TRACE_EVENT(i915_gem_ring_flush,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 984a0c5fbf5d..6c6f95a534b1 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -965,7 +965,9 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
965} 965}
966 966
967static int 967static int
968i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) 968i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
969 u32 offset, u32 length,
970 unsigned flags)
969{ 971{
970 int ret; 972 int ret;
971 973
@@ -976,7 +978,7 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
976 intel_ring_emit(ring, 978 intel_ring_emit(ring,
977 MI_BATCH_BUFFER_START | 979 MI_BATCH_BUFFER_START |
978 MI_BATCH_GTT | 980 MI_BATCH_GTT |
979 MI_BATCH_NON_SECURE_I965); 981 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
980 intel_ring_emit(ring, offset); 982 intel_ring_emit(ring, offset);
981 intel_ring_advance(ring); 983 intel_ring_advance(ring);
982 984
@@ -985,7 +987,8 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
985 987
986static int 988static int
987i830_dispatch_execbuffer(struct intel_ring_buffer *ring, 989i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
988 u32 offset, u32 len) 990 u32 offset, u32 len,
991 unsigned flags)
989{ 992{
990 int ret; 993 int ret;
991 994
@@ -994,7 +997,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
994 return ret; 997 return ret;
995 998
996 intel_ring_emit(ring, MI_BATCH_BUFFER); 999 intel_ring_emit(ring, MI_BATCH_BUFFER);
997 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); 1000 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
998 intel_ring_emit(ring, offset + len - 8); 1001 intel_ring_emit(ring, offset + len - 8);
999 intel_ring_emit(ring, 0); 1002 intel_ring_emit(ring, 0);
1000 intel_ring_advance(ring); 1003 intel_ring_advance(ring);
@@ -1004,7 +1007,8 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
1004 1007
1005static int 1008static int
1006i915_dispatch_execbuffer(struct intel_ring_buffer *ring, 1009i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
1007 u32 offset, u32 len) 1010 u32 offset, u32 len,
1011 unsigned flags)
1008{ 1012{
1009 int ret; 1013 int ret;
1010 1014
@@ -1013,7 +1017,7 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
1013 return ret; 1017 return ret;
1014 1018
1015 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 1019 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1016 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); 1020 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1017 intel_ring_advance(ring); 1021 intel_ring_advance(ring);
1018 1022
1019 return 0; 1023 return 0;
@@ -1403,8 +1407,30 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
1403} 1407}
1404 1408
1405static int 1409static int
1410hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1411 u32 offset, u32 len,
1412 unsigned flags)
1413{
1414 int ret;
1415
1416 ret = intel_ring_begin(ring, 2);
1417 if (ret)
1418 return ret;
1419
1420 intel_ring_emit(ring,
1421 MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
1422 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
1423 /* bit0-7 is the length on GEN6+ */
1424 intel_ring_emit(ring, offset);
1425 intel_ring_advance(ring);
1426
1427 return 0;
1428}
1429
1430static int
1406gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, 1431gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1407 u32 offset, u32 len) 1432 u32 offset, u32 len,
1433 unsigned flags)
1408{ 1434{
1409 int ret; 1435 int ret;
1410 1436
@@ -1412,7 +1438,9 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1412 if (ret) 1438 if (ret)
1413 return ret; 1439 return ret;
1414 1440
1415 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); 1441 intel_ring_emit(ring,
1442 MI_BATCH_BUFFER_START |
1443 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
1416 /* bit0-7 is the length on GEN6+ */ 1444 /* bit0-7 is the length on GEN6+ */
1417 intel_ring_emit(ring, offset); 1445 intel_ring_emit(ring, offset);
1418 intel_ring_advance(ring); 1446 intel_ring_advance(ring);
@@ -1491,7 +1519,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1491 ring->irq_enable_mask = I915_USER_INTERRUPT; 1519 ring->irq_enable_mask = I915_USER_INTERRUPT;
1492 } 1520 }
1493 ring->write_tail = ring_write_tail; 1521 ring->write_tail = ring_write_tail;
1494 if (INTEL_INFO(dev)->gen >= 6) 1522 if (IS_HASWELL(dev))
1523 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
1524 else if (INTEL_INFO(dev)->gen >= 6)
1495 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 1525 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1496 else if (INTEL_INFO(dev)->gen >= 4) 1526 else if (INTEL_INFO(dev)->gen >= 4)
1497 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 1527 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2ea7a311a1f0..3745d1dc1fa1 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -81,7 +81,9 @@ struct intel_ring_buffer {
81 u32 (*get_seqno)(struct intel_ring_buffer *ring, 81 u32 (*get_seqno)(struct intel_ring_buffer *ring,
82 bool lazy_coherency); 82 bool lazy_coherency);
83 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, 83 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
84 u32 offset, u32 length); 84 u32 offset, u32 length,
85 unsigned flags);
86#define I915_DISPATCH_SECURE 0x1
85 void (*cleanup)(struct intel_ring_buffer *ring); 87 void (*cleanup)(struct intel_ring_buffer *ring);
86 int (*sync_to)(struct intel_ring_buffer *ring, 88 int (*sync_to)(struct intel_ring_buffer *ring,
87 struct intel_ring_buffer *to, 89 struct intel_ring_buffer *to,