aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-12-04 06:30:53 -0500
committerChris Wilson <chris@chris-wilson.co.uk>2010-12-04 19:37:38 -0500
commit1ec14ad3132702694f2e1a90b30641cf111183b9 (patch)
tree98ca9ae91f14ff5d8feed306941ea2c46479e71a /drivers/gpu/drm/i915/intel_ringbuffer.c
parent340479aac697bc73e225c122a9753d4964eeda3f (diff)
drm/i915: Implement GPU semaphores for inter-ring synchronisation on SNB
The bulk of the change is to convert the growing list of rings into an array so that the relationship between the rings and the semaphore sync registers can be easily computed. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c415
1 files changed, 259 insertions, 156 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 21871b0766e..f71db0cf490 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -203,6 +203,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
203 if (ring->space < 0) 203 if (ring->space < 0)
204 ring->space += ring->size; 204 ring->space += ring->size;
205 } 205 }
206
206 return 0; 207 return 0;
207} 208}
208 209
@@ -281,17 +282,18 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
281static int init_render_ring(struct intel_ring_buffer *ring) 282static int init_render_ring(struct intel_ring_buffer *ring)
282{ 283{
283 struct drm_device *dev = ring->dev; 284 struct drm_device *dev = ring->dev;
285 struct drm_i915_private *dev_priv = dev->dev_private;
284 int ret = init_ring_common(ring); 286 int ret = init_ring_common(ring);
285 287
286 if (INTEL_INFO(dev)->gen > 3) { 288 if (INTEL_INFO(dev)->gen > 3) {
287 drm_i915_private_t *dev_priv = dev->dev_private;
288 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; 289 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
289 if (IS_GEN6(dev)) 290 if (IS_GEN6(dev))
290 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; 291 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
291 I915_WRITE(MI_MODE, mode); 292 I915_WRITE(MI_MODE, mode);
292 } 293 }
293 294
294 if (HAS_PIPE_CONTROL(dev)) { 295 if (INTEL_INFO(dev)->gen >= 6) {
296 } else if (HAS_PIPE_CONTROL(dev)) {
295 ret = init_pipe_control(ring); 297 ret = init_pipe_control(ring);
296 if (ret) 298 if (ret)
297 return ret; 299 return ret;
@@ -308,6 +310,80 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
308 cleanup_pipe_control(ring); 310 cleanup_pipe_control(ring);
309} 311}
310 312
313static void
314update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
315{
316 struct drm_device *dev = ring->dev;
317 struct drm_i915_private *dev_priv = dev->dev_private;
318 int id;
319
320 /*
321 * cs -> 1 = vcs, 0 = bcs
322 * vcs -> 1 = bcs, 0 = cs,
323 * bcs -> 1 = cs, 0 = vcs.
324 */
325 id = ring - dev_priv->ring;
326 id += 2 - i;
327 id %= 3;
328
329 intel_ring_emit(ring,
330 MI_SEMAPHORE_MBOX |
331 MI_SEMAPHORE_REGISTER |
332 MI_SEMAPHORE_UPDATE);
333 intel_ring_emit(ring, seqno);
334 intel_ring_emit(ring,
335 RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
336}
337
338static int
339gen6_add_request(struct intel_ring_buffer *ring,
340 u32 *result)
341{
342 u32 seqno;
343 int ret;
344
345 ret = intel_ring_begin(ring, 10);
346 if (ret)
347 return ret;
348
349 seqno = i915_gem_get_seqno(ring->dev);
350 update_semaphore(ring, 0, seqno);
351 update_semaphore(ring, 1, seqno);
352
353 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
354 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
355 intel_ring_emit(ring, seqno);
356 intel_ring_emit(ring, MI_USER_INTERRUPT);
357 intel_ring_advance(ring);
358
359 *result = seqno;
360 return 0;
361}
362
363int
364intel_ring_sync(struct intel_ring_buffer *ring,
365 struct intel_ring_buffer *to,
366 u32 seqno)
367{
368 int ret;
369
370 ret = intel_ring_begin(ring, 4);
371 if (ret)
372 return ret;
373
374 intel_ring_emit(ring,
375 MI_SEMAPHORE_MBOX |
376 MI_SEMAPHORE_REGISTER |
377 intel_ring_sync_index(ring, to) << 17 |
378 MI_SEMAPHORE_COMPARE);
379 intel_ring_emit(ring, seqno);
380 intel_ring_emit(ring, 0);
381 intel_ring_emit(ring, MI_NOOP);
382 intel_ring_advance(ring);
383
384 return 0;
385}
386
311#define PIPE_CONTROL_FLUSH(ring__, addr__) \ 387#define PIPE_CONTROL_FLUSH(ring__, addr__) \
312do { \ 388do { \
313 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ 389 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
@@ -317,131 +393,128 @@ do { \
317 intel_ring_emit(ring__, 0); \ 393 intel_ring_emit(ring__, 0); \
318} while (0) 394} while (0)
319 395
320/**
321 * Creates a new sequence number, emitting a write of it to the status page
322 * plus an interrupt, which will trigger i915_user_interrupt_handler.
323 *
324 * Must be called with struct_lock held.
325 *
326 * Returned sequence numbers are nonzero on success.
327 */
328static int 396static int
329render_ring_add_request(struct intel_ring_buffer *ring, 397pc_render_add_request(struct intel_ring_buffer *ring,
330 u32 *result) 398 u32 *result)
331{ 399{
332 struct drm_device *dev = ring->dev; 400 struct drm_device *dev = ring->dev;
333 u32 seqno = i915_gem_get_seqno(dev); 401 u32 seqno = i915_gem_get_seqno(dev);
334 struct pipe_control *pc = ring->private; 402 struct pipe_control *pc = ring->private;
403 u32 scratch_addr = pc->gtt_offset + 128;
335 int ret; 404 int ret;
336 405
337 if (IS_GEN6(dev)) { 406 /*
338 ret = intel_ring_begin(ring, 6); 407 * Workaround qword write incoherence by flushing the
339 if (ret) 408 * PIPE_NOTIFY buffers out to memory before requesting
340 return ret; 409 * an interrupt.
341 410 */
342 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | 3); 411 ret = intel_ring_begin(ring, 32);
343 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE | 412 if (ret)
344 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | 413 return ret;
345 PIPE_CONTROL_NOTIFY);
346 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
347 intel_ring_emit(ring, seqno);
348 intel_ring_emit(ring, 0);
349 intel_ring_emit(ring, 0);
350 } else if (HAS_PIPE_CONTROL(dev)) {
351 u32 scratch_addr = pc->gtt_offset + 128;
352 414
353 /* 415 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
354 * Workaround qword write incoherence by flushing the 416 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
355 * PIPE_NOTIFY buffers out to memory before requesting 417 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
356 * an interrupt. 418 intel_ring_emit(ring, seqno);
357 */ 419 intel_ring_emit(ring, 0);
358 ret = intel_ring_begin(ring, 32); 420 PIPE_CONTROL_FLUSH(ring, scratch_addr);
359 if (ret) 421 scratch_addr += 128; /* write to separate cachelines */
360 return ret; 422 PIPE_CONTROL_FLUSH(ring, scratch_addr);
423 scratch_addr += 128;
424 PIPE_CONTROL_FLUSH(ring, scratch_addr);
425 scratch_addr += 128;
426 PIPE_CONTROL_FLUSH(ring, scratch_addr);
427 scratch_addr += 128;
428 PIPE_CONTROL_FLUSH(ring, scratch_addr);
429 scratch_addr += 128;
430 PIPE_CONTROL_FLUSH(ring, scratch_addr);
431 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
432 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
433 PIPE_CONTROL_NOTIFY);
434 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
435 intel_ring_emit(ring, seqno);
436 intel_ring_emit(ring, 0);
437 intel_ring_advance(ring);
361 438
362 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | 439 *result = seqno;
363 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); 440 return 0;
364 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 441}
365 intel_ring_emit(ring, seqno);
366 intel_ring_emit(ring, 0);
367 PIPE_CONTROL_FLUSH(ring, scratch_addr);
368 scratch_addr += 128; /* write to separate cachelines */
369 PIPE_CONTROL_FLUSH(ring, scratch_addr);
370 scratch_addr += 128;
371 PIPE_CONTROL_FLUSH(ring, scratch_addr);
372 scratch_addr += 128;
373 PIPE_CONTROL_FLUSH(ring, scratch_addr);
374 scratch_addr += 128;
375 PIPE_CONTROL_FLUSH(ring, scratch_addr);
376 scratch_addr += 128;
377 PIPE_CONTROL_FLUSH(ring, scratch_addr);
378 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
379 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
380 PIPE_CONTROL_NOTIFY);
381 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
382 intel_ring_emit(ring, seqno);
383 intel_ring_emit(ring, 0);
384 } else {
385 ret = intel_ring_begin(ring, 4);
386 if (ret)
387 return ret;
388 442
389 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 443static int
390 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 444render_ring_add_request(struct intel_ring_buffer *ring,
391 intel_ring_emit(ring, seqno); 445 u32 *result)
446{
447 struct drm_device *dev = ring->dev;
448 u32 seqno = i915_gem_get_seqno(dev);
449 int ret;
392 450
393 intel_ring_emit(ring, MI_USER_INTERRUPT); 451 ret = intel_ring_begin(ring, 4);
394 } 452 if (ret)
453 return ret;
395 454
455 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
456 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
457 intel_ring_emit(ring, seqno);
458 intel_ring_emit(ring, MI_USER_INTERRUPT);
396 intel_ring_advance(ring); 459 intel_ring_advance(ring);
460
397 *result = seqno; 461 *result = seqno;
398 return 0; 462 return 0;
399} 463}
400 464
401static u32 465static u32
402render_ring_get_seqno(struct intel_ring_buffer *ring) 466ring_get_seqno(struct intel_ring_buffer *ring)
403{ 467{
404 struct drm_device *dev = ring->dev; 468 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
405 if (HAS_PIPE_CONTROL(dev)) { 469}
406 struct pipe_control *pc = ring->private; 470
407 return pc->cpu_page[0]; 471static u32
408 } else 472pc_render_get_seqno(struct intel_ring_buffer *ring)
409 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 473{
474 struct pipe_control *pc = ring->private;
475 return pc->cpu_page[0];
410} 476}
411 477
412static void 478static void
413render_ring_get_user_irq(struct intel_ring_buffer *ring) 479render_ring_get_irq(struct intel_ring_buffer *ring)
414{ 480{
415 struct drm_device *dev = ring->dev; 481 struct drm_device *dev = ring->dev;
416 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
417 unsigned long irqflags;
418 482
419 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 483 if (dev->irq_enabled && ++ring->irq_refcount == 1) {
420 if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) { 484 drm_i915_private_t *dev_priv = dev->dev_private;
485 unsigned long irqflags;
486
487 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
488
421 if (HAS_PCH_SPLIT(dev)) 489 if (HAS_PCH_SPLIT(dev))
422 ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); 490 ironlake_enable_graphics_irq(dev_priv,
491 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
423 else 492 else
424 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 493 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
494
495 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
425 } 496 }
426 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
427} 497}
428 498
429static void 499static void
430render_ring_put_user_irq(struct intel_ring_buffer *ring) 500render_ring_put_irq(struct intel_ring_buffer *ring)
431{ 501{
432 struct drm_device *dev = ring->dev; 502 struct drm_device *dev = ring->dev;
433 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
434 unsigned long irqflags;
435 503
436 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 504 BUG_ON(dev->irq_enabled && ring->irq_refcount == 0);
437 BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0); 505 if (dev->irq_enabled && --ring->irq_refcount == 0) {
438 if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) { 506 drm_i915_private_t *dev_priv = dev->dev_private;
507 unsigned long irqflags;
508
509 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
439 if (HAS_PCH_SPLIT(dev)) 510 if (HAS_PCH_SPLIT(dev))
440 ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); 511 ironlake_disable_graphics_irq(dev_priv,
512 GT_USER_INTERRUPT |
513 GT_PIPE_NOTIFY);
441 else 514 else
442 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 515 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
516 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
443 } 517 }
444 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
445} 518}
446 519
447void intel_ring_setup_status_page(struct intel_ring_buffer *ring) 520void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
@@ -459,6 +532,9 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
459 u32 invalidate_domains, 532 u32 invalidate_domains,
460 u32 flush_domains) 533 u32 flush_domains)
461{ 534{
535 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
536 return;
537
462 if (intel_ring_begin(ring, 2) == 0) { 538 if (intel_ring_begin(ring, 2) == 0) {
463 intel_ring_emit(ring, MI_FLUSH); 539 intel_ring_emit(ring, MI_FLUSH);
464 intel_ring_emit(ring, MI_NOOP); 540 intel_ring_emit(ring, MI_NOOP);
@@ -491,20 +567,45 @@ ring_add_request(struct intel_ring_buffer *ring,
491} 567}
492 568
493static void 569static void
494bsd_ring_get_user_irq(struct intel_ring_buffer *ring) 570ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
495{ 571{
496 /* do nothing */ 572 struct drm_device *dev = ring->dev;
573
574 if (dev->irq_enabled && ++ring->irq_refcount == 1) {
575 drm_i915_private_t *dev_priv = dev->dev_private;
576 unsigned long irqflags;
577
578 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
579 ironlake_enable_graphics_irq(dev_priv, flag);
580 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
581 }
497} 582}
583
498static void 584static void
499bsd_ring_put_user_irq(struct intel_ring_buffer *ring) 585ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
500{ 586{
501 /* do nothing */ 587 struct drm_device *dev = ring->dev;
588
589 if (dev->irq_enabled && --ring->irq_refcount == 0) {
590 drm_i915_private_t *dev_priv = dev->dev_private;
591 unsigned long irqflags;
592
593 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
594 ironlake_disable_graphics_irq(dev_priv, flag);
595 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
596 }
502} 597}
503 598
504static u32 599
505ring_status_page_get_seqno(struct intel_ring_buffer *ring) 600static void
601bsd_ring_get_irq(struct intel_ring_buffer *ring)
506{ 602{
507 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 603 ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
604}
605static void
606bsd_ring_put_irq(struct intel_ring_buffer *ring)
607{
608 ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
508} 609}
509 610
510static int 611static int
@@ -817,9 +918,9 @@ static const struct intel_ring_buffer render_ring = {
817 .write_tail = ring_write_tail, 918 .write_tail = ring_write_tail,
818 .flush = render_ring_flush, 919 .flush = render_ring_flush,
819 .add_request = render_ring_add_request, 920 .add_request = render_ring_add_request,
820 .get_seqno = render_ring_get_seqno, 921 .get_seqno = ring_get_seqno,
821 .user_irq_get = render_ring_get_user_irq, 922 .irq_get = render_ring_get_irq,
822 .user_irq_put = render_ring_put_user_irq, 923 .irq_put = render_ring_put_irq,
823 .dispatch_execbuffer = render_ring_dispatch_execbuffer, 924 .dispatch_execbuffer = render_ring_dispatch_execbuffer,
824 .cleanup = render_ring_cleanup, 925 .cleanup = render_ring_cleanup,
825}; 926};
@@ -835,9 +936,9 @@ static const struct intel_ring_buffer bsd_ring = {
835 .write_tail = ring_write_tail, 936 .write_tail = ring_write_tail,
836 .flush = bsd_ring_flush, 937 .flush = bsd_ring_flush,
837 .add_request = ring_add_request, 938 .add_request = ring_add_request,
838 .get_seqno = ring_status_page_get_seqno, 939 .get_seqno = ring_get_seqno,
839 .user_irq_get = bsd_ring_get_user_irq, 940 .irq_get = bsd_ring_get_irq,
840 .user_irq_put = bsd_ring_put_user_irq, 941 .irq_put = bsd_ring_put_irq,
841 .dispatch_execbuffer = ring_dispatch_execbuffer, 942 .dispatch_execbuffer = ring_dispatch_execbuffer,
842}; 943};
843 944
@@ -868,6 +969,9 @@ static void gen6_ring_flush(struct intel_ring_buffer *ring,
868 u32 invalidate_domains, 969 u32 invalidate_domains,
869 u32 flush_domains) 970 u32 flush_domains)
870{ 971{
972 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
973 return;
974
871 if (intel_ring_begin(ring, 4) == 0) { 975 if (intel_ring_begin(ring, 4) == 0) {
872 intel_ring_emit(ring, MI_FLUSH_DW); 976 intel_ring_emit(ring, MI_FLUSH_DW);
873 intel_ring_emit(ring, 0); 977 intel_ring_emit(ring, 0);
@@ -895,33 +999,46 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
895 return 0; 999 return 0;
896} 1000}
897 1001
1002static void
1003gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1004{
1005 ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
1006}
1007
1008static void
1009gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1010{
1011 ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
1012}
1013
898/* ring buffer for Video Codec for Gen6+ */ 1014/* ring buffer for Video Codec for Gen6+ */
899static const struct intel_ring_buffer gen6_bsd_ring = { 1015static const struct intel_ring_buffer gen6_bsd_ring = {
900 .name = "gen6 bsd ring", 1016 .name = "gen6 bsd ring",
901 .id = RING_BSD, 1017 .id = RING_BSD,
902 .mmio_base = GEN6_BSD_RING_BASE, 1018 .mmio_base = GEN6_BSD_RING_BASE,
903 .size = 32 * PAGE_SIZE, 1019 .size = 32 * PAGE_SIZE,
904 .init = init_ring_common, 1020 .init = init_ring_common,
905 .write_tail = gen6_bsd_ring_write_tail, 1021 .write_tail = gen6_bsd_ring_write_tail,
906 .flush = gen6_ring_flush, 1022 .flush = gen6_ring_flush,
907 .add_request = ring_add_request, 1023 .add_request = gen6_add_request,
908 .get_seqno = ring_status_page_get_seqno, 1024 .get_seqno = ring_get_seqno,
909 .user_irq_get = bsd_ring_get_user_irq, 1025 .irq_get = gen6_bsd_ring_get_irq,
910 .user_irq_put = bsd_ring_put_user_irq, 1026 .irq_put = gen6_bsd_ring_put_irq,
911 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, 1027 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
912}; 1028};
913 1029
914/* Blitter support (SandyBridge+) */ 1030/* Blitter support (SandyBridge+) */
915 1031
916static void 1032static void
917blt_ring_get_user_irq(struct intel_ring_buffer *ring) 1033blt_ring_get_irq(struct intel_ring_buffer *ring)
918{ 1034{
919 /* do nothing */ 1035 ring_get_irq(ring, GT_BLT_USER_INTERRUPT);
920} 1036}
1037
921static void 1038static void
922blt_ring_put_user_irq(struct intel_ring_buffer *ring) 1039blt_ring_put_irq(struct intel_ring_buffer *ring)
923{ 1040{
924 /* do nothing */ 1041 ring_put_irq(ring, GT_BLT_USER_INTERRUPT);
925} 1042}
926 1043
927 1044
@@ -994,6 +1111,9 @@ static void blt_ring_flush(struct intel_ring_buffer *ring,
994 u32 invalidate_domains, 1111 u32 invalidate_domains,
995 u32 flush_domains) 1112 u32 flush_domains)
996{ 1113{
1114 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
1115 return;
1116
997 if (blt_ring_begin(ring, 4) == 0) { 1117 if (blt_ring_begin(ring, 4) == 0) {
998 intel_ring_emit(ring, MI_FLUSH_DW); 1118 intel_ring_emit(ring, MI_FLUSH_DW);
999 intel_ring_emit(ring, 0); 1119 intel_ring_emit(ring, 0);
@@ -1003,30 +1123,6 @@ static void blt_ring_flush(struct intel_ring_buffer *ring,
1003 } 1123 }
1004} 1124}
1005 1125
1006static int
1007blt_ring_add_request(struct intel_ring_buffer *ring,
1008 u32 *result)
1009{
1010 u32 seqno;
1011 int ret;
1012
1013 ret = blt_ring_begin(ring, 4);
1014 if (ret)
1015 return ret;
1016
1017 seqno = i915_gem_get_seqno(ring->dev);
1018
1019 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
1020 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1021 intel_ring_emit(ring, seqno);
1022 intel_ring_emit(ring, MI_USER_INTERRUPT);
1023 intel_ring_advance(ring);
1024
1025 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
1026 *result = seqno;
1027 return 0;
1028}
1029
1030static void blt_ring_cleanup(struct intel_ring_buffer *ring) 1126static void blt_ring_cleanup(struct intel_ring_buffer *ring)
1031{ 1127{
1032 if (!ring->private) 1128 if (!ring->private)
@@ -1045,10 +1141,10 @@ static const struct intel_ring_buffer gen6_blt_ring = {
1045 .init = blt_ring_init, 1141 .init = blt_ring_init,
1046 .write_tail = ring_write_tail, 1142 .write_tail = ring_write_tail,
1047 .flush = blt_ring_flush, 1143 .flush = blt_ring_flush,
1048 .add_request = blt_ring_add_request, 1144 .add_request = gen6_add_request,
1049 .get_seqno = ring_status_page_get_seqno, 1145 .get_seqno = ring_get_seqno,
1050 .user_irq_get = blt_ring_get_user_irq, 1146 .irq_get = blt_ring_get_irq,
1051 .user_irq_put = blt_ring_put_user_irq, 1147 .irq_put = blt_ring_put_irq,
1052 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, 1148 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1053 .cleanup = blt_ring_cleanup, 1149 .cleanup = blt_ring_cleanup,
1054}; 1150};
@@ -1056,36 +1152,43 @@ static const struct intel_ring_buffer gen6_blt_ring = {
1056int intel_init_render_ring_buffer(struct drm_device *dev) 1152int intel_init_render_ring_buffer(struct drm_device *dev)
1057{ 1153{
1058 drm_i915_private_t *dev_priv = dev->dev_private; 1154 drm_i915_private_t *dev_priv = dev->dev_private;
1155 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1059 1156
1060 dev_priv->render_ring = render_ring; 1157 *ring = render_ring;
1158 if (INTEL_INFO(dev)->gen >= 6) {
1159 ring->add_request = gen6_add_request;
1160 } else if (HAS_PIPE_CONTROL(dev)) {
1161 ring->add_request = pc_render_add_request;
1162 ring->get_seqno = pc_render_get_seqno;
1163 }
1061 1164
1062 if (!I915_NEED_GFX_HWS(dev)) { 1165 if (!I915_NEED_GFX_HWS(dev)) {
1063 dev_priv->render_ring.status_page.page_addr 1166 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1064 = dev_priv->status_page_dmah->vaddr; 1167 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1065 memset(dev_priv->render_ring.status_page.page_addr,
1066 0, PAGE_SIZE);
1067 } 1168 }
1068 1169
1069 return intel_init_ring_buffer(dev, &dev_priv->render_ring); 1170 return intel_init_ring_buffer(dev, ring);
1070} 1171}
1071 1172
1072int intel_init_bsd_ring_buffer(struct drm_device *dev) 1173int intel_init_bsd_ring_buffer(struct drm_device *dev)
1073{ 1174{
1074 drm_i915_private_t *dev_priv = dev->dev_private; 1175 drm_i915_private_t *dev_priv = dev->dev_private;
1176 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1075 1177
1076 if (IS_GEN6(dev)) 1178 if (IS_GEN6(dev))
1077 dev_priv->bsd_ring = gen6_bsd_ring; 1179 *ring = gen6_bsd_ring;
1078 else 1180 else
1079 dev_priv->bsd_ring = bsd_ring; 1181 *ring = bsd_ring;
1080 1182
1081 return intel_init_ring_buffer(dev, &dev_priv->bsd_ring); 1183 return intel_init_ring_buffer(dev, ring);
1082} 1184}
1083 1185
1084int intel_init_blt_ring_buffer(struct drm_device *dev) 1186int intel_init_blt_ring_buffer(struct drm_device *dev)
1085{ 1187{
1086 drm_i915_private_t *dev_priv = dev->dev_private; 1188 drm_i915_private_t *dev_priv = dev->dev_private;
1189 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1087 1190
1088 dev_priv->blt_ring = gen6_blt_ring; 1191 *ring = gen6_blt_ring;
1089 1192
1090 return intel_init_ring_buffer(dev, &dev_priv->blt_ring); 1193 return intel_init_ring_buffer(dev, ring);
1091} 1194}