aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorJohn Harrison <John.C.Harrison@Intel.com>2015-05-29 12:43:57 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2015-06-23 08:02:21 -0400
commita84c3ae168837dbedd0bde76a536360e84ae863a (patch)
tree1671b7eac2c6f2faa4587f5f8268f470494b8dd3 /drivers/gpu/drm/i915/intel_ringbuffer.c
parente85b26dc1ca5ecbf6456c61a131a986a755cbc69 (diff)
drm/i915: Update ring->flush() to take a requests structure
Updated the various ring->flush() functions to take a request instead of a ring. Also updated the tracer to include the request id. For: VIZ-5115 Signed-off-by: John Harrison <John.C.Harrison@Intel.com> Reviewed-by: Tomas Elf <tomas.elf@intel.com> [danvet: Rebase since I didn't merge the addition of req->uniq.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c34
1 files changed, 21 insertions, 13 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 48ca73e7aaa6..2425dc2db42c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -91,10 +91,11 @@ static void __intel_ring_advance(struct intel_engine_cs *ring)
91} 91}
92 92
93static int 93static int
94gen2_render_ring_flush(struct intel_engine_cs *ring, 94gen2_render_ring_flush(struct drm_i915_gem_request *req,
95 u32 invalidate_domains, 95 u32 invalidate_domains,
96 u32 flush_domains) 96 u32 flush_domains)
97{ 97{
98 struct intel_engine_cs *ring = req->ring;
98 u32 cmd; 99 u32 cmd;
99 int ret; 100 int ret;
100 101
@@ -117,10 +118,11 @@ gen2_render_ring_flush(struct intel_engine_cs *ring,
117} 118}
118 119
119static int 120static int
120gen4_render_ring_flush(struct intel_engine_cs *ring, 121gen4_render_ring_flush(struct drm_i915_gem_request *req,
121 u32 invalidate_domains, 122 u32 invalidate_domains,
122 u32 flush_domains) 123 u32 flush_domains)
123{ 124{
125 struct intel_engine_cs *ring = req->ring;
124 struct drm_device *dev = ring->dev; 126 struct drm_device *dev = ring->dev;
125 u32 cmd; 127 u32 cmd;
126 int ret; 128 int ret;
@@ -247,9 +249,10 @@ intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
247} 249}
248 250
249static int 251static int
250gen6_render_ring_flush(struct intel_engine_cs *ring, 252gen6_render_ring_flush(struct drm_i915_gem_request *req,
251 u32 invalidate_domains, u32 flush_domains) 253 u32 invalidate_domains, u32 flush_domains)
252{ 254{
255 struct intel_engine_cs *ring = req->ring;
253 u32 flags = 0; 256 u32 flags = 0;
254 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 257 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
255 int ret; 258 int ret;
@@ -318,9 +321,10 @@ gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
318} 321}
319 322
320static int 323static int
321gen7_render_ring_flush(struct intel_engine_cs *ring, 324gen7_render_ring_flush(struct drm_i915_gem_request *req,
322 u32 invalidate_domains, u32 flush_domains) 325 u32 invalidate_domains, u32 flush_domains)
323{ 326{
327 struct intel_engine_cs *ring = req->ring;
324 u32 flags = 0; 328 u32 flags = 0;
325 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 329 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
326 int ret; 330 int ret;
@@ -400,9 +404,10 @@ gen8_emit_pipe_control(struct intel_engine_cs *ring,
400} 404}
401 405
402static int 406static int
403gen8_render_ring_flush(struct intel_engine_cs *ring, 407gen8_render_ring_flush(struct drm_i915_gem_request *req,
404 u32 invalidate_domains, u32 flush_domains) 408 u32 invalidate_domains, u32 flush_domains)
405{ 409{
410 struct intel_engine_cs *ring = req->ring;
406 u32 flags = 0; 411 u32 flags = 0;
407 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 412 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
408 int ret; 413 int ret;
@@ -1594,10 +1599,11 @@ i8xx_ring_put_irq(struct intel_engine_cs *ring)
1594} 1599}
1595 1600
1596static int 1601static int
1597bsd_ring_flush(struct intel_engine_cs *ring, 1602bsd_ring_flush(struct drm_i915_gem_request *req,
1598 u32 invalidate_domains, 1603 u32 invalidate_domains,
1599 u32 flush_domains) 1604 u32 flush_domains)
1600{ 1605{
1606 struct intel_engine_cs *ring = req->ring;
1601 int ret; 1607 int ret;
1602 1608
1603 ret = intel_ring_begin(ring, 2); 1609 ret = intel_ring_begin(ring, 2);
@@ -2372,9 +2378,10 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
2372 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 2378 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2373} 2379}
2374 2380
2375static int gen6_bsd_ring_flush(struct intel_engine_cs *ring, 2381static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
2376 u32 invalidate, u32 flush) 2382 u32 invalidate, u32 flush)
2377{ 2383{
2384 struct intel_engine_cs *ring = req->ring;
2378 uint32_t cmd; 2385 uint32_t cmd;
2379 int ret; 2386 int ret;
2380 2387
@@ -2484,9 +2491,10 @@ gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2484 2491
2485/* Blitter support (SandyBridge+) */ 2492/* Blitter support (SandyBridge+) */
2486 2493
2487static int gen6_ring_flush(struct intel_engine_cs *ring, 2494static int gen6_ring_flush(struct drm_i915_gem_request *req,
2488 u32 invalidate, u32 flush) 2495 u32 invalidate, u32 flush)
2489{ 2496{
2497 struct intel_engine_cs *ring = req->ring;
2490 struct drm_device *dev = ring->dev; 2498 struct drm_device *dev = ring->dev;
2491 uint32_t cmd; 2499 uint32_t cmd;
2492 int ret; 2500 int ret;
@@ -2900,11 +2908,11 @@ intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
2900 if (!ring->gpu_caches_dirty) 2908 if (!ring->gpu_caches_dirty)
2901 return 0; 2909 return 0;
2902 2910
2903 ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS); 2911 ret = ring->flush(req, 0, I915_GEM_GPU_DOMAINS);
2904 if (ret) 2912 if (ret)
2905 return ret; 2913 return ret;
2906 2914
2907 trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS); 2915 trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
2908 2916
2909 ring->gpu_caches_dirty = false; 2917 ring->gpu_caches_dirty = false;
2910 return 0; 2918 return 0;
@@ -2921,11 +2929,11 @@ intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
2921 if (ring->gpu_caches_dirty) 2929 if (ring->gpu_caches_dirty)
2922 flush_domains = I915_GEM_GPU_DOMAINS; 2930 flush_domains = I915_GEM_GPU_DOMAINS;
2923 2931
2924 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); 2932 ret = ring->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
2925 if (ret) 2933 if (ret)
2926 return ret; 2934 return ret;
2927 2935
2928 trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); 2936 trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
2929 2937
2930 ring->gpu_caches_dirty = false; 2938 ring->gpu_caches_dirty = false;
2931 return 0; 2939 return 0;