aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorJohn Harrison <John.C.Harrison@Intel.com>2015-05-29 12:44:07 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2015-06-23 08:02:29 -0400
commit5fb9de1a2ea1968b57c906c6770794f1e7744828 (patch)
tree378728ad7bce8217c51b4606dcf58a1839c1e34b /drivers/gpu/drm/i915/intel_ringbuffer.c
parentbba09b12b47b31b147206f5784691d2fb8888bf1 (diff)
drm/i915: Update intel_ring_begin() to take a request structure
Now that everything above has been converted to use requests, intel_ring_begin() can be updated to take a request instead of a ring. This also means that it no longer needs to lazily allocate a request if no-one happens to have done it earlier. For: VIZ-5115 Signed-off-by: John Harrison <John.C.Harrison@Intel.com> Reviewed-by: Tomas Elf <tomas.elf@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c74
1 files changed, 36 insertions, 38 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 6a77014e1d66..dfba3ee57382 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -106,7 +106,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
106 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) 106 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
107 cmd |= MI_READ_FLUSH; 107 cmd |= MI_READ_FLUSH;
108 108
109 ret = intel_ring_begin(ring, 2); 109 ret = intel_ring_begin(req, 2);
110 if (ret) 110 if (ret)
111 return ret; 111 return ret;
112 112
@@ -165,7 +165,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
165 (IS_G4X(dev) || IS_GEN5(dev))) 165 (IS_G4X(dev) || IS_GEN5(dev)))
166 cmd |= MI_INVALIDATE_ISP; 166 cmd |= MI_INVALIDATE_ISP;
167 167
168 ret = intel_ring_begin(ring, 2); 168 ret = intel_ring_begin(req, 2);
169 if (ret) 169 if (ret)
170 return ret; 170 return ret;
171 171
@@ -220,8 +220,7 @@ intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
220 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 220 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
221 int ret; 221 int ret;
222 222
223 223 ret = intel_ring_begin(req, 6);
224 ret = intel_ring_begin(ring, 6);
225 if (ret) 224 if (ret)
226 return ret; 225 return ret;
227 226
@@ -234,7 +233,7 @@ intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
234 intel_ring_emit(ring, MI_NOOP); 233 intel_ring_emit(ring, MI_NOOP);
235 intel_ring_advance(ring); 234 intel_ring_advance(ring);
236 235
237 ret = intel_ring_begin(ring, 6); 236 ret = intel_ring_begin(req, 6);
238 if (ret) 237 if (ret)
239 return ret; 238 return ret;
240 239
@@ -289,7 +288,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
289 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; 288 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
290 } 289 }
291 290
292 ret = intel_ring_begin(ring, 4); 291 ret = intel_ring_begin(req, 4);
293 if (ret) 292 if (ret)
294 return ret; 293 return ret;
295 294
@@ -308,7 +307,7 @@ gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
308 struct intel_engine_cs *ring = req->ring; 307 struct intel_engine_cs *ring = req->ring;
309 int ret; 308 int ret;
310 309
311 ret = intel_ring_begin(ring, 4); 310 ret = intel_ring_begin(req, 4);
312 if (ret) 311 if (ret)
313 return ret; 312 return ret;
314 313
@@ -371,7 +370,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
371 gen7_render_ring_cs_stall_wa(req); 370 gen7_render_ring_cs_stall_wa(req);
372 } 371 }
373 372
374 ret = intel_ring_begin(ring, 4); 373 ret = intel_ring_begin(req, 4);
375 if (ret) 374 if (ret)
376 return ret; 375 return ret;
377 376
@@ -391,7 +390,7 @@ gen8_emit_pipe_control(struct drm_i915_gem_request *req,
391 struct intel_engine_cs *ring = req->ring; 390 struct intel_engine_cs *ring = req->ring;
392 int ret; 391 int ret;
393 392
394 ret = intel_ring_begin(ring, 6); 393 ret = intel_ring_begin(req, 6);
395 if (ret) 394 if (ret)
396 return ret; 395 return ret;
397 396
@@ -726,7 +725,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
726 if (ret) 725 if (ret)
727 return ret; 726 return ret;
728 727
729 ret = intel_ring_begin(ring, (w->count * 2 + 2)); 728 ret = intel_ring_begin(req, (w->count * 2 + 2));
730 if (ret) 729 if (ret)
731 return ret; 730 return ret;
732 731
@@ -1185,7 +1184,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
1185 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 1184 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
1186#undef MBOX_UPDATE_DWORDS 1185#undef MBOX_UPDATE_DWORDS
1187 1186
1188 ret = intel_ring_begin(signaller, num_dwords); 1187 ret = intel_ring_begin(signaller_req, num_dwords);
1189 if (ret) 1188 if (ret)
1190 return ret; 1189 return ret;
1191 1190
@@ -1226,7 +1225,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
1226 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 1225 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
1227#undef MBOX_UPDATE_DWORDS 1226#undef MBOX_UPDATE_DWORDS
1228 1227
1229 ret = intel_ring_begin(signaller, num_dwords); 1228 ret = intel_ring_begin(signaller_req, num_dwords);
1230 if (ret) 1229 if (ret)
1231 return ret; 1230 return ret;
1232 1231
@@ -1265,7 +1264,7 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
1265 num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2); 1264 num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
1266#undef MBOX_UPDATE_DWORDS 1265#undef MBOX_UPDATE_DWORDS
1267 1266
1268 ret = intel_ring_begin(signaller, num_dwords); 1267 ret = intel_ring_begin(signaller_req, num_dwords);
1269 if (ret) 1268 if (ret)
1270 return ret; 1269 return ret;
1271 1270
@@ -1303,7 +1302,7 @@ gen6_add_request(struct drm_i915_gem_request *req)
1303 if (ring->semaphore.signal) 1302 if (ring->semaphore.signal)
1304 ret = ring->semaphore.signal(req, 4); 1303 ret = ring->semaphore.signal(req, 4);
1305 else 1304 else
1306 ret = intel_ring_begin(ring, 4); 1305 ret = intel_ring_begin(req, 4);
1307 1306
1308 if (ret) 1307 if (ret)
1309 return ret; 1308 return ret;
@@ -1341,7 +1340,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
1341 struct drm_i915_private *dev_priv = waiter->dev->dev_private; 1340 struct drm_i915_private *dev_priv = waiter->dev->dev_private;
1342 int ret; 1341 int ret;
1343 1342
1344 ret = intel_ring_begin(waiter, 4); 1343 ret = intel_ring_begin(waiter_req, 4);
1345 if (ret) 1344 if (ret)
1346 return ret; 1345 return ret;
1347 1346
@@ -1378,7 +1377,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
1378 1377
1379 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID); 1378 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
1380 1379
1381 ret = intel_ring_begin(waiter, 4); 1380 ret = intel_ring_begin(waiter_req, 4);
1382 if (ret) 1381 if (ret)
1383 return ret; 1382 return ret;
1384 1383
@@ -1423,7 +1422,7 @@ pc_render_add_request(struct drm_i915_gem_request *req)
1423 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to 1422 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
1424 * memory before requesting an interrupt. 1423 * memory before requesting an interrupt.
1425 */ 1424 */
1426 ret = intel_ring_begin(ring, 32); 1425 ret = intel_ring_begin(req, 32);
1427 if (ret) 1426 if (ret)
1428 return ret; 1427 return ret;
1429 1428
@@ -1608,7 +1607,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
1608 struct intel_engine_cs *ring = req->ring; 1607 struct intel_engine_cs *ring = req->ring;
1609 int ret; 1608 int ret;
1610 1609
1611 ret = intel_ring_begin(ring, 2); 1610 ret = intel_ring_begin(req, 2);
1612 if (ret) 1611 if (ret)
1613 return ret; 1612 return ret;
1614 1613
@@ -1624,7 +1623,7 @@ i9xx_add_request(struct drm_i915_gem_request *req)
1624 struct intel_engine_cs *ring = req->ring; 1623 struct intel_engine_cs *ring = req->ring;
1625 int ret; 1624 int ret;
1626 1625
1627 ret = intel_ring_begin(ring, 4); 1626 ret = intel_ring_begin(req, 4);
1628 if (ret) 1627 if (ret)
1629 return ret; 1628 return ret;
1630 1629
@@ -1769,7 +1768,7 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
1769 struct intel_engine_cs *ring = req->ring; 1768 struct intel_engine_cs *ring = req->ring;
1770 int ret; 1769 int ret;
1771 1770
1772 ret = intel_ring_begin(ring, 2); 1771 ret = intel_ring_begin(req, 2);
1773 if (ret) 1772 if (ret)
1774 return ret; 1773 return ret;
1775 1774
@@ -1797,7 +1796,7 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
1797 u32 cs_offset = ring->scratch.gtt_offset; 1796 u32 cs_offset = ring->scratch.gtt_offset;
1798 int ret; 1797 int ret;
1799 1798
1800 ret = intel_ring_begin(ring, 6); 1799 ret = intel_ring_begin(req, 6);
1801 if (ret) 1800 if (ret)
1802 return ret; 1801 return ret;
1803 1802
@@ -1814,7 +1813,7 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
1814 if (len > I830_BATCH_LIMIT) 1813 if (len > I830_BATCH_LIMIT)
1815 return -ENOSPC; 1814 return -ENOSPC;
1816 1815
1817 ret = intel_ring_begin(ring, 6 + 2); 1816 ret = intel_ring_begin(req, 6 + 2);
1818 if (ret) 1817 if (ret)
1819 return ret; 1818 return ret;
1820 1819
@@ -1837,7 +1836,7 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
1837 offset = cs_offset; 1836 offset = cs_offset;
1838 } 1837 }
1839 1838
1840 ret = intel_ring_begin(ring, 4); 1839 ret = intel_ring_begin(req, 4);
1841 if (ret) 1840 if (ret)
1842 return ret; 1841 return ret;
1843 1842
@@ -1859,7 +1858,7 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
1859 struct intel_engine_cs *ring = req->ring; 1858 struct intel_engine_cs *ring = req->ring;
1860 int ret; 1859 int ret;
1861 1860
1862 ret = intel_ring_begin(ring, 2); 1861 ret = intel_ring_begin(req, 2);
1863 if (ret) 1862 if (ret)
1864 return ret; 1863 return ret;
1865 1864
@@ -2285,13 +2284,17 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
2285 return 0; 2284 return 0;
2286} 2285}
2287 2286
2288int intel_ring_begin(struct intel_engine_cs *ring, 2287int intel_ring_begin(struct drm_i915_gem_request *req,
2289 int num_dwords) 2288 int num_dwords)
2290{ 2289{
2291 struct drm_i915_gem_request *req; 2290 struct intel_engine_cs *ring;
2292 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2291 struct drm_i915_private *dev_priv;
2293 int ret; 2292 int ret;
2294 2293
2294 WARN_ON(req == NULL);
2295 ring = req->ring;
2296 dev_priv = ring->dev->dev_private;
2297
2295 ret = i915_gem_check_wedge(&dev_priv->gpu_error, 2298 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
2296 dev_priv->mm.interruptible); 2299 dev_priv->mm.interruptible);
2297 if (ret) 2300 if (ret)
@@ -2301,11 +2304,6 @@ int intel_ring_begin(struct intel_engine_cs *ring,
2301 if (ret) 2304 if (ret)
2302 return ret; 2305 return ret;
2303 2306
2304 /* Preallocate the olr before touching the ring */
2305 ret = i915_gem_request_alloc(ring, ring->default_context, &req);
2306 if (ret)
2307 return ret;
2308
2309 ring->buffer->space -= num_dwords * sizeof(uint32_t); 2307 ring->buffer->space -= num_dwords * sizeof(uint32_t);
2310 return 0; 2308 return 0;
2311} 2309}
@@ -2321,7 +2319,7 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
2321 return 0; 2319 return 0;
2322 2320
2323 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords; 2321 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
2324 ret = intel_ring_begin(ring, num_dwords); 2322 ret = intel_ring_begin(req, num_dwords);
2325 if (ret) 2323 if (ret)
2326 return ret; 2324 return ret;
2327 2325
@@ -2391,7 +2389,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
2391 uint32_t cmd; 2389 uint32_t cmd;
2392 int ret; 2390 int ret;
2393 2391
2394 ret = intel_ring_begin(ring, 4); 2392 ret = intel_ring_begin(req, 4);
2395 if (ret) 2393 if (ret)
2396 return ret; 2394 return ret;
2397 2395
@@ -2438,7 +2436,7 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
2438 !(dispatch_flags & I915_DISPATCH_SECURE); 2436 !(dispatch_flags & I915_DISPATCH_SECURE);
2439 int ret; 2437 int ret;
2440 2438
2441 ret = intel_ring_begin(ring, 4); 2439 ret = intel_ring_begin(req, 4);
2442 if (ret) 2440 if (ret)
2443 return ret; 2441 return ret;
2444 2442
@@ -2460,7 +2458,7 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
2460 struct intel_engine_cs *ring = req->ring; 2458 struct intel_engine_cs *ring = req->ring;
2461 int ret; 2459 int ret;
2462 2460
2463 ret = intel_ring_begin(ring, 2); 2461 ret = intel_ring_begin(req, 2);
2464 if (ret) 2462 if (ret)
2465 return ret; 2463 return ret;
2466 2464
@@ -2483,7 +2481,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
2483 struct intel_engine_cs *ring = req->ring; 2481 struct intel_engine_cs *ring = req->ring;
2484 int ret; 2482 int ret;
2485 2483
2486 ret = intel_ring_begin(ring, 2); 2484 ret = intel_ring_begin(req, 2);
2487 if (ret) 2485 if (ret)
2488 return ret; 2486 return ret;
2489 2487
@@ -2508,7 +2506,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
2508 uint32_t cmd; 2506 uint32_t cmd;
2509 int ret; 2507 int ret;
2510 2508
2511 ret = intel_ring_begin(ring, 4); 2509 ret = intel_ring_begin(req, 4);
2512 if (ret) 2510 if (ret)
2513 return ret; 2511 return ret;
2514 2512