aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn Harrison <John.C.Harrison@Intel.com>2015-02-13 06:48:10 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2015-02-25 16:43:29 -0500
commit8e004efc16541e7f6e35673449195db5d1f92f40 (patch)
tree7b0d48fb698ef222a3d7c8a4d1493ef4234380a0
parent06dc68d68090ce6da12f7fde03e64a32f0f6e5d0 (diff)
drm/i915: Rename 'flags' to 'dispatch_flags' for better code reading
There is a flags word that is passed through the execbuffer code path all the way from initial decoding of the user parameters down to the very final dispatch buffer call. It is simply called 'flags'. Unfortuantely, there are many other flags words floating around in the same blocks of code. Even more once the GPU scheduler arrives. This patch makes it more obvious exactly which flags word is which by renaming 'flags' to 'dispatch_flags'. Note that the bit definitions for this flags word already have an 'I915_DISPATCH_' prefix on them and so are not quite so ambiguous. OTC-Jira: VIZ-1587 Signed-off-by: John Harrison <John.C.Harrison@Intel.com> [danvet: Resolve conflict with Chris' rework of the bb parsing.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c25
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c10
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c35
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h4
5 files changed, 41 insertions, 35 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 82636aa7052d..85a6adaba258 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1127,7 +1127,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
1127 struct drm_i915_gem_execbuffer2 *args, 1127 struct drm_i915_gem_execbuffer2 *args,
1128 struct list_head *vmas, 1128 struct list_head *vmas,
1129 struct drm_i915_gem_object *batch_obj, 1129 struct drm_i915_gem_object *batch_obj,
1130 u64 exec_start, u32 flags) 1130 u64 exec_start, u32 dispatch_flags)
1131{ 1131{
1132 struct drm_clip_rect *cliprects = NULL; 1132 struct drm_clip_rect *cliprects = NULL;
1133 struct drm_i915_private *dev_priv = dev->dev_private; 1133 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1255,19 +1255,19 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
1255 1255
1256 ret = ring->dispatch_execbuffer(ring, 1256 ret = ring->dispatch_execbuffer(ring,
1257 exec_start, exec_len, 1257 exec_start, exec_len,
1258 flags); 1258 dispatch_flags);
1259 if (ret) 1259 if (ret)
1260 goto error; 1260 goto error;
1261 } 1261 }
1262 } else { 1262 } else {
1263 ret = ring->dispatch_execbuffer(ring, 1263 ret = ring->dispatch_execbuffer(ring,
1264 exec_start, exec_len, 1264 exec_start, exec_len,
1265 flags); 1265 dispatch_flags);
1266 if (ret) 1266 if (ret)
1267 return ret; 1267 return ret;
1268 } 1268 }
1269 1269
1270 trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), flags); 1270 trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
1271 1271
1272 i915_gem_execbuffer_move_to_active(vmas, ring); 1272 i915_gem_execbuffer_move_to_active(vmas, ring);
1273 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); 1273 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
@@ -1342,7 +1342,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1342 struct i915_address_space *vm; 1342 struct i915_address_space *vm;
1343 const u32 ctx_id = i915_execbuffer2_get_context_id(*args); 1343 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1344 u64 exec_start = args->batch_start_offset; 1344 u64 exec_start = args->batch_start_offset;
1345 u32 flags; 1345 u32 dispatch_flags;
1346 int ret; 1346 int ret;
1347 bool need_relocs; 1347 bool need_relocs;
1348 1348
@@ -1353,15 +1353,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1353 if (ret) 1353 if (ret)
1354 return ret; 1354 return ret;
1355 1355
1356 flags = 0; 1356 dispatch_flags = 0;
1357 if (args->flags & I915_EXEC_SECURE) { 1357 if (args->flags & I915_EXEC_SECURE) {
1358 if (!file->is_master || !capable(CAP_SYS_ADMIN)) 1358 if (!file->is_master || !capable(CAP_SYS_ADMIN))
1359 return -EPERM; 1359 return -EPERM;
1360 1360
1361 flags |= I915_DISPATCH_SECURE; 1361 dispatch_flags |= I915_DISPATCH_SECURE;
1362 } 1362 }
1363 if (args->flags & I915_EXEC_IS_PINNED) 1363 if (args->flags & I915_EXEC_IS_PINNED)
1364 flags |= I915_DISPATCH_PINNED; 1364 dispatch_flags |= I915_DISPATCH_PINNED;
1365 1365
1366 if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) { 1366 if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
1367 DRM_DEBUG("execbuf with unknown ring: %d\n", 1367 DRM_DEBUG("execbuf with unknown ring: %d\n",
@@ -1501,7 +1501,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1501 * this check when that is fixed. 1501 * this check when that is fixed.
1502 */ 1502 */
1503 if (USES_FULL_PPGTT(dev)) 1503 if (USES_FULL_PPGTT(dev))
1504 flags |= I915_DISPATCH_SECURE; 1504 dispatch_flags |= I915_DISPATCH_SECURE;
1505 1505
1506 exec_start = 0; 1506 exec_start = 0;
1507 } 1507 }
@@ -1511,7 +1511,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1511 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure 1511 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1512 * batch" bit. Hence we need to pin secure batches into the global gtt. 1512 * batch" bit. Hence we need to pin secure batches into the global gtt.
1513 * hsw should have this fixed, but bdw mucks it up again. */ 1513 * hsw should have this fixed, but bdw mucks it up again. */
1514 if (flags & I915_DISPATCH_SECURE) { 1514 if (dispatch_flags & I915_DISPATCH_SECURE) {
1515 /* 1515 /*
1516 * So on first glance it looks freaky that we pin the batch here 1516 * So on first glance it looks freaky that we pin the batch here
1517 * outside of the reservation loop. But: 1517 * outside of the reservation loop. But:
@@ -1531,7 +1531,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1531 exec_start += i915_gem_obj_offset(batch_obj, vm); 1531 exec_start += i915_gem_obj_offset(batch_obj, vm);
1532 1532
1533 ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args, 1533 ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
1534 &eb->vmas, batch_obj, exec_start, flags); 1534 &eb->vmas, batch_obj, exec_start,
1535 dispatch_flags);
1535 1536
1536 /* 1537 /*
1537 * FIXME: We crucially rely upon the active tracking for the (ppgtt) 1538 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
@@ -1539,7 +1540,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1539 * needs to be adjusted to also track the ggtt batch vma properly as 1540 * needs to be adjusted to also track the ggtt batch vma properly as
1540 * active. 1541 * active.
1541 */ 1542 */
1542 if (flags & I915_DISPATCH_SECURE) 1543 if (dispatch_flags & I915_DISPATCH_SECURE)
1543 i915_gem_object_ggtt_unpin(batch_obj); 1544 i915_gem_object_ggtt_unpin(batch_obj);
1544err: 1545err:
1545 /* the request owns the ref now */ 1546 /* the request owns the ref now */
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 9ef5fcde1300..82c6aaf05803 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -620,7 +620,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
620 * @vmas: list of vmas. 620 * @vmas: list of vmas.
621 * @batch_obj: the batchbuffer to submit. 621 * @batch_obj: the batchbuffer to submit.
622 * @exec_start: batchbuffer start virtual address pointer. 622 * @exec_start: batchbuffer start virtual address pointer.
623 * @flags: translated execbuffer call flags. 623 * @dispatch_flags: translated execbuffer call flags.
624 * 624 *
625 * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts 625 * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
626 * away the submission details of the execbuffer ioctl call. 626 * away the submission details of the execbuffer ioctl call.
@@ -633,7 +633,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
633 struct drm_i915_gem_execbuffer2 *args, 633 struct drm_i915_gem_execbuffer2 *args,
634 struct list_head *vmas, 634 struct list_head *vmas,
635 struct drm_i915_gem_object *batch_obj, 635 struct drm_i915_gem_object *batch_obj,
636 u64 exec_start, u32 flags) 636 u64 exec_start, u32 dispatch_flags)
637{ 637{
638 struct drm_i915_private *dev_priv = dev->dev_private; 638 struct drm_i915_private *dev_priv = dev->dev_private;
639 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; 639 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
@@ -706,7 +706,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
706 dev_priv->relative_constants_mode = instp_mode; 706 dev_priv->relative_constants_mode = instp_mode;
707 } 707 }
708 708
709 ret = ring->emit_bb_start(ringbuf, ctx, exec_start, flags); 709 ret = ring->emit_bb_start(ringbuf, ctx, exec_start, dispatch_flags);
710 if (ret) 710 if (ret)
711 return ret; 711 return ret;
712 712
@@ -1163,9 +1163,9 @@ static int gen9_init_render_ring(struct intel_engine_cs *ring)
1163 1163
1164static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf, 1164static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
1165 struct intel_context *ctx, 1165 struct intel_context *ctx,
1166 u64 offset, unsigned flags) 1166 u64 offset, unsigned dispatch_flags)
1167{ 1167{
1168 bool ppgtt = !(flags & I915_DISPATCH_SECURE); 1168 bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
1169 int ret; 1169 int ret;
1170 1170
1171 ret = intel_logical_ring_begin(ringbuf, ctx, 4); 1171 ret = intel_logical_ring_begin(ringbuf, ctx, 4);
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 5dd0ecaf6128..adb731e49c57 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -84,7 +84,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
84 struct drm_i915_gem_execbuffer2 *args, 84 struct drm_i915_gem_execbuffer2 *args,
85 struct list_head *vmas, 85 struct list_head *vmas,
86 struct drm_i915_gem_object *batch_obj, 86 struct drm_i915_gem_object *batch_obj,
87 u64 exec_start, u32 flags); 87 u64 exec_start, u32 dispatch_flags);
88u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj); 88u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
89 89
90void intel_lrc_irq_handler(struct intel_engine_cs *ring); 90void intel_lrc_irq_handler(struct intel_engine_cs *ring);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 665985d5fcf4..4a4a7aec0fc3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1741,7 +1741,7 @@ gen8_ring_put_irq(struct intel_engine_cs *ring)
1741static int 1741static int
1742i965_dispatch_execbuffer(struct intel_engine_cs *ring, 1742i965_dispatch_execbuffer(struct intel_engine_cs *ring,
1743 u64 offset, u32 length, 1743 u64 offset, u32 length,
1744 unsigned flags) 1744 unsigned dispatch_flags)
1745{ 1745{
1746 int ret; 1746 int ret;
1747 1747
@@ -1752,7 +1752,8 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
1752 intel_ring_emit(ring, 1752 intel_ring_emit(ring,
1753 MI_BATCH_BUFFER_START | 1753 MI_BATCH_BUFFER_START |
1754 MI_BATCH_GTT | 1754 MI_BATCH_GTT |
1755 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); 1755 (dispatch_flags & I915_DISPATCH_SECURE ?
1756 0 : MI_BATCH_NON_SECURE_I965));
1756 intel_ring_emit(ring, offset); 1757 intel_ring_emit(ring, offset);
1757 intel_ring_advance(ring); 1758 intel_ring_advance(ring);
1758 1759
@@ -1765,8 +1766,8 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring,
1765#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) 1766#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1766static int 1767static int
1767i830_dispatch_execbuffer(struct intel_engine_cs *ring, 1768i830_dispatch_execbuffer(struct intel_engine_cs *ring,
1768 u64 offset, u32 len, 1769 u64 offset, u32 len,
1769 unsigned flags) 1770 unsigned dispatch_flags)
1770{ 1771{
1771 u32 cs_offset = ring->scratch.gtt_offset; 1772 u32 cs_offset = ring->scratch.gtt_offset;
1772 int ret; 1773 int ret;
@@ -1784,7 +1785,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
1784 intel_ring_emit(ring, MI_NOOP); 1785 intel_ring_emit(ring, MI_NOOP);
1785 intel_ring_advance(ring); 1786 intel_ring_advance(ring);
1786 1787
1787 if ((flags & I915_DISPATCH_PINNED) == 0) { 1788 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
1788 if (len > I830_BATCH_LIMIT) 1789 if (len > I830_BATCH_LIMIT)
1789 return -ENOSPC; 1790 return -ENOSPC;
1790 1791
@@ -1816,7 +1817,8 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
1816 return ret; 1817 return ret;
1817 1818
1818 intel_ring_emit(ring, MI_BATCH_BUFFER); 1819 intel_ring_emit(ring, MI_BATCH_BUFFER);
1819 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 1820 intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
1821 0 : MI_BATCH_NON_SECURE));
1820 intel_ring_emit(ring, offset + len - 8); 1822 intel_ring_emit(ring, offset + len - 8);
1821 intel_ring_emit(ring, MI_NOOP); 1823 intel_ring_emit(ring, MI_NOOP);
1822 intel_ring_advance(ring); 1824 intel_ring_advance(ring);
@@ -1827,7 +1829,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
1827static int 1829static int
1828i915_dispatch_execbuffer(struct intel_engine_cs *ring, 1830i915_dispatch_execbuffer(struct intel_engine_cs *ring,
1829 u64 offset, u32 len, 1831 u64 offset, u32 len,
1830 unsigned flags) 1832 unsigned dispatch_flags)
1831{ 1833{
1832 int ret; 1834 int ret;
1833 1835
@@ -1836,7 +1838,8 @@ i915_dispatch_execbuffer(struct intel_engine_cs *ring,
1836 return ret; 1838 return ret;
1837 1839
1838 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 1840 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1839 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 1841 intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
1842 0 : MI_BATCH_NON_SECURE));
1840 intel_ring_advance(ring); 1843 intel_ring_advance(ring);
1841 1844
1842 return 0; 1845 return 0;
@@ -2395,9 +2398,10 @@ static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,
2395static int 2398static int
2396gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 2399gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2397 u64 offset, u32 len, 2400 u64 offset, u32 len,
2398 unsigned flags) 2401 unsigned dispatch_flags)
2399{ 2402{
2400 bool ppgtt = USES_PPGTT(ring->dev) && !(flags & I915_DISPATCH_SECURE); 2403 bool ppgtt = USES_PPGTT(ring->dev) &&
2404 !(dispatch_flags & I915_DISPATCH_SECURE);
2401 int ret; 2405 int ret;
2402 2406
2403 ret = intel_ring_begin(ring, 4); 2407 ret = intel_ring_begin(ring, 4);
@@ -2416,8 +2420,8 @@ gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2416 2420
2417static int 2421static int
2418hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 2422hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2419 u64 offset, u32 len, 2423 u64 offset, u32 len,
2420 unsigned flags) 2424 unsigned dispatch_flags)
2421{ 2425{
2422 int ret; 2426 int ret;
2423 2427
@@ -2427,7 +2431,7 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2427 2431
2428 intel_ring_emit(ring, 2432 intel_ring_emit(ring,
2429 MI_BATCH_BUFFER_START | 2433 MI_BATCH_BUFFER_START |
2430 (flags & I915_DISPATCH_SECURE ? 2434 (dispatch_flags & I915_DISPATCH_SECURE ?
2431 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW)); 2435 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
2432 /* bit0-7 is the length on GEN6+ */ 2436 /* bit0-7 is the length on GEN6+ */
2433 intel_ring_emit(ring, offset); 2437 intel_ring_emit(ring, offset);
@@ -2439,7 +2443,7 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2439static int 2443static int
2440gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 2444gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2441 u64 offset, u32 len, 2445 u64 offset, u32 len,
2442 unsigned flags) 2446 unsigned dispatch_flags)
2443{ 2447{
2444 int ret; 2448 int ret;
2445 2449
@@ -2449,7 +2453,8 @@ gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2449 2453
2450 intel_ring_emit(ring, 2454 intel_ring_emit(ring,
2451 MI_BATCH_BUFFER_START | 2455 MI_BATCH_BUFFER_START |
2452 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); 2456 (dispatch_flags & I915_DISPATCH_SECURE ?
2457 0 : MI_BATCH_NON_SECURE_I965));
2453 /* bit0-7 is the length on GEN6+ */ 2458 /* bit0-7 is the length on GEN6+ */
2454 intel_ring_emit(ring, offset); 2459 intel_ring_emit(ring, offset);
2455 intel_ring_advance(ring); 2460 intel_ring_advance(ring);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 39183fcbdcf3..8f3b49a23ccf 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -164,7 +164,7 @@ struct intel_engine_cs {
164 u32 seqno); 164 u32 seqno);
165 int (*dispatch_execbuffer)(struct intel_engine_cs *ring, 165 int (*dispatch_execbuffer)(struct intel_engine_cs *ring,
166 u64 offset, u32 length, 166 u64 offset, u32 length,
167 unsigned flags); 167 unsigned dispatch_flags);
168#define I915_DISPATCH_SECURE 0x1 168#define I915_DISPATCH_SECURE 0x1
169#define I915_DISPATCH_PINNED 0x2 169#define I915_DISPATCH_PINNED 0x2
170 void (*cleanup)(struct intel_engine_cs *ring); 170 void (*cleanup)(struct intel_engine_cs *ring);
@@ -242,7 +242,7 @@ struct intel_engine_cs {
242 u32 flush_domains); 242 u32 flush_domains);
243 int (*emit_bb_start)(struct intel_ringbuffer *ringbuf, 243 int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
244 struct intel_context *ctx, 244 struct intel_context *ctx,
245 u64 offset, unsigned flags); 245 u64 offset, unsigned dispatch_flags);
246 246
247 /** 247 /**
248 * List of objects currently involved in rendering from the 248 * List of objects currently involved in rendering from the