diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 216 |
1 files changed, 68 insertions, 148 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index f457146ff6a4..1d01b51ff058 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -589,14 +589,10 @@ static int init_ring_common(struct intel_engine_cs *ring) | |||
589 | goto out; | 589 | goto out; |
590 | } | 590 | } |
591 | 591 | ||
592 | if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) | 592 | ringbuf->head = I915_READ_HEAD(ring); |
593 | i915_kernel_lost_context(ring->dev); | 593 | ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
594 | else { | 594 | ringbuf->space = intel_ring_space(ringbuf); |
595 | ringbuf->head = I915_READ_HEAD(ring); | 595 | ringbuf->last_retired_head = -1; |
596 | ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; | ||
597 | ringbuf->space = intel_ring_space(ringbuf); | ||
598 | ringbuf->last_retired_head = -1; | ||
599 | } | ||
600 | 596 | ||
601 | memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); | 597 | memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); |
602 | 598 | ||
@@ -665,7 +661,8 @@ err: | |||
665 | return ret; | 661 | return ret; |
666 | } | 662 | } |
667 | 663 | ||
668 | static int intel_ring_workarounds_emit(struct intel_engine_cs *ring) | 664 | static int intel_ring_workarounds_emit(struct intel_engine_cs *ring, |
665 | struct intel_context *ctx) | ||
669 | { | 666 | { |
670 | int ret, i; | 667 | int ret, i; |
671 | struct drm_device *dev = ring->dev; | 668 | struct drm_device *dev = ring->dev; |
@@ -788,25 +785,25 @@ static int chv_init_workarounds(struct intel_engine_cs *ring) | |||
788 | struct drm_i915_private *dev_priv = dev->dev_private; | 785 | struct drm_i915_private *dev_priv = dev->dev_private; |
789 | 786 | ||
790 | /* WaDisablePartialInstShootdown:chv */ | 787 | /* WaDisablePartialInstShootdown:chv */ |
791 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, | ||
792 | PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); | ||
793 | |||
794 | /* WaDisableThreadStallDopClockGating:chv */ | 788 | /* WaDisableThreadStallDopClockGating:chv */ |
795 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, | 789 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, |
796 | STALL_DOP_GATING_DISABLE); | 790 | PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE | |
797 | 791 | STALL_DOP_GATING_DISABLE); | |
798 | /* WaDisableDopClockGating:chv (pre-production hw) */ | ||
799 | WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, | ||
800 | DOP_CLOCK_GATING_DISABLE); | ||
801 | 792 | ||
802 | /* WaDisableSamplerPowerBypass:chv (pre-production hw) */ | 793 | /* Use Force Non-Coherent whenever executing a 3D context. This is a |
803 | WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, | 794 | * workaround for a possible hang in the unlikely event a TLB |
804 | GEN8_SAMPLER_POWER_BYPASS_DIS); | 795 | * invalidation occurs during a PSD flush. |
796 | */ | ||
797 | /* WaForceEnableNonCoherent:chv */ | ||
798 | /* WaHdcDisableFetchWhenMasked:chv */ | ||
799 | WA_SET_BIT_MASKED(HDC_CHICKEN0, | ||
800 | HDC_FORCE_NON_COHERENT | | ||
801 | HDC_DONOT_FETCH_MEM_WHEN_MASKED); | ||
805 | 802 | ||
806 | return 0; | 803 | return 0; |
807 | } | 804 | } |
808 | 805 | ||
809 | static int init_workarounds_ring(struct intel_engine_cs *ring) | 806 | int init_workarounds_ring(struct intel_engine_cs *ring) |
810 | { | 807 | { |
811 | struct drm_device *dev = ring->dev; | 808 | struct drm_device *dev = ring->dev; |
812 | struct drm_i915_private *dev_priv = dev->dev_private; | 809 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -1721,13 +1718,42 @@ static int init_phys_status_page(struct intel_engine_cs *ring) | |||
1721 | return 0; | 1718 | return 0; |
1722 | } | 1719 | } |
1723 | 1720 | ||
1724 | void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) | 1721 | void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) |
1725 | { | 1722 | { |
1726 | if (!ringbuf->obj) | ||
1727 | return; | ||
1728 | |||
1729 | iounmap(ringbuf->virtual_start); | 1723 | iounmap(ringbuf->virtual_start); |
1724 | ringbuf->virtual_start = NULL; | ||
1730 | i915_gem_object_ggtt_unpin(ringbuf->obj); | 1725 | i915_gem_object_ggtt_unpin(ringbuf->obj); |
1726 | } | ||
1727 | |||
1728 | int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, | ||
1729 | struct intel_ringbuffer *ringbuf) | ||
1730 | { | ||
1731 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
1732 | struct drm_i915_gem_object *obj = ringbuf->obj; | ||
1733 | int ret; | ||
1734 | |||
1735 | ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); | ||
1736 | if (ret) | ||
1737 | return ret; | ||
1738 | |||
1739 | ret = i915_gem_object_set_to_gtt_domain(obj, true); | ||
1740 | if (ret) { | ||
1741 | i915_gem_object_ggtt_unpin(obj); | ||
1742 | return ret; | ||
1743 | } | ||
1744 | |||
1745 | ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base + | ||
1746 | i915_gem_obj_ggtt_offset(obj), ringbuf->size); | ||
1747 | if (ringbuf->virtual_start == NULL) { | ||
1748 | i915_gem_object_ggtt_unpin(obj); | ||
1749 | return -EINVAL; | ||
1750 | } | ||
1751 | |||
1752 | return 0; | ||
1753 | } | ||
1754 | |||
1755 | void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) | ||
1756 | { | ||
1731 | drm_gem_object_unreference(&ringbuf->obj->base); | 1757 | drm_gem_object_unreference(&ringbuf->obj->base); |
1732 | ringbuf->obj = NULL; | 1758 | ringbuf->obj = NULL; |
1733 | } | 1759 | } |
@@ -1735,12 +1761,7 @@ void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) | |||
1735 | int intel_alloc_ringbuffer_obj(struct drm_device *dev, | 1761 | int intel_alloc_ringbuffer_obj(struct drm_device *dev, |
1736 | struct intel_ringbuffer *ringbuf) | 1762 | struct intel_ringbuffer *ringbuf) |
1737 | { | 1763 | { |
1738 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
1739 | struct drm_i915_gem_object *obj; | 1764 | struct drm_i915_gem_object *obj; |
1740 | int ret; | ||
1741 | |||
1742 | if (ringbuf->obj) | ||
1743 | return 0; | ||
1744 | 1765 | ||
1745 | obj = NULL; | 1766 | obj = NULL; |
1746 | if (!HAS_LLC(dev)) | 1767 | if (!HAS_LLC(dev)) |
@@ -1753,30 +1774,9 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev, | |||
1753 | /* mark ring buffers as read-only from GPU side by default */ | 1774 | /* mark ring buffers as read-only from GPU side by default */ |
1754 | obj->gt_ro = 1; | 1775 | obj->gt_ro = 1; |
1755 | 1776 | ||
1756 | ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); | ||
1757 | if (ret) | ||
1758 | goto err_unref; | ||
1759 | |||
1760 | ret = i915_gem_object_set_to_gtt_domain(obj, true); | ||
1761 | if (ret) | ||
1762 | goto err_unpin; | ||
1763 | |||
1764 | ringbuf->virtual_start = | ||
1765 | ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), | ||
1766 | ringbuf->size); | ||
1767 | if (ringbuf->virtual_start == NULL) { | ||
1768 | ret = -EINVAL; | ||
1769 | goto err_unpin; | ||
1770 | } | ||
1771 | |||
1772 | ringbuf->obj = obj; | 1777 | ringbuf->obj = obj; |
1773 | return 0; | ||
1774 | 1778 | ||
1775 | err_unpin: | 1779 | return 0; |
1776 | i915_gem_object_ggtt_unpin(obj); | ||
1777 | err_unref: | ||
1778 | drm_gem_object_unreference(&obj->base); | ||
1779 | return ret; | ||
1780 | } | 1780 | } |
1781 | 1781 | ||
1782 | static int intel_init_ring_buffer(struct drm_device *dev, | 1782 | static int intel_init_ring_buffer(struct drm_device *dev, |
@@ -1813,10 +1813,21 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
1813 | goto error; | 1813 | goto error; |
1814 | } | 1814 | } |
1815 | 1815 | ||
1816 | ret = intel_alloc_ringbuffer_obj(dev, ringbuf); | 1816 | if (ringbuf->obj == NULL) { |
1817 | if (ret) { | 1817 | ret = intel_alloc_ringbuffer_obj(dev, ringbuf); |
1818 | DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret); | 1818 | if (ret) { |
1819 | goto error; | 1819 | DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", |
1820 | ring->name, ret); | ||
1821 | goto error; | ||
1822 | } | ||
1823 | |||
1824 | ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); | ||
1825 | if (ret) { | ||
1826 | DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", | ||
1827 | ring->name, ret); | ||
1828 | intel_destroy_ringbuffer_obj(ringbuf); | ||
1829 | goto error; | ||
1830 | } | ||
1820 | } | 1831 | } |
1821 | 1832 | ||
1822 | /* Workaround an erratum on the i830 which causes a hang if | 1833 | /* Workaround an erratum on the i830 which causes a hang if |
@@ -1857,6 +1868,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) | |||
1857 | intel_stop_ring_buffer(ring); | 1868 | intel_stop_ring_buffer(ring); |
1858 | WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); | 1869 | WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); |
1859 | 1870 | ||
1871 | intel_unpin_ringbuffer_obj(ringbuf); | ||
1860 | intel_destroy_ringbuffer_obj(ringbuf); | 1872 | intel_destroy_ringbuffer_obj(ringbuf); |
1861 | ring->preallocated_lazy_request = NULL; | 1873 | ring->preallocated_lazy_request = NULL; |
1862 | ring->outstanding_lazy_seqno = 0; | 1874 | ring->outstanding_lazy_seqno = 0; |
@@ -1942,13 +1954,6 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n) | |||
1942 | break; | 1954 | break; |
1943 | } | 1955 | } |
1944 | 1956 | ||
1945 | if (!drm_core_check_feature(dev, DRIVER_MODESET) && | ||
1946 | dev->primary->master) { | ||
1947 | struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; | ||
1948 | if (master_priv->sarea_priv) | ||
1949 | master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; | ||
1950 | } | ||
1951 | |||
1952 | msleep(1); | 1957 | msleep(1); |
1953 | 1958 | ||
1954 | if (dev_priv->mm.interruptible && signal_pending(current)) { | 1959 | if (dev_priv->mm.interruptible && signal_pending(current)) { |
@@ -2439,91 +2444,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev) | |||
2439 | return intel_init_ring_buffer(dev, ring); | 2444 | return intel_init_ring_buffer(dev, ring); |
2440 | } | 2445 | } |
2441 | 2446 | ||
2442 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) | ||
2443 | { | ||
2444 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2445 | struct intel_engine_cs *ring = &dev_priv->ring[RCS]; | ||
2446 | struct intel_ringbuffer *ringbuf = ring->buffer; | ||
2447 | int ret; | ||
2448 | |||
2449 | if (ringbuf == NULL) { | ||
2450 | ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); | ||
2451 | if (!ringbuf) | ||
2452 | return -ENOMEM; | ||
2453 | ring->buffer = ringbuf; | ||
2454 | } | ||
2455 | |||
2456 | ring->name = "render ring"; | ||
2457 | ring->id = RCS; | ||
2458 | ring->mmio_base = RENDER_RING_BASE; | ||
2459 | |||
2460 | if (INTEL_INFO(dev)->gen >= 6) { | ||
2461 | /* non-kms not supported on gen6+ */ | ||
2462 | ret = -ENODEV; | ||
2463 | goto err_ringbuf; | ||
2464 | } | ||
2465 | |||
2466 | /* Note: gem is not supported on gen5/ilk without kms (the corresponding | ||
2467 | * gem_init ioctl returns with -ENODEV). Hence we do not need to set up | ||
2468 | * the special gen5 functions. */ | ||
2469 | ring->add_request = i9xx_add_request; | ||
2470 | if (INTEL_INFO(dev)->gen < 4) | ||
2471 | ring->flush = gen2_render_ring_flush; | ||
2472 | else | ||
2473 | ring->flush = gen4_render_ring_flush; | ||
2474 | ring->get_seqno = ring_get_seqno; | ||
2475 | ring->set_seqno = ring_set_seqno; | ||
2476 | if (IS_GEN2(dev)) { | ||
2477 | ring->irq_get = i8xx_ring_get_irq; | ||
2478 | ring->irq_put = i8xx_ring_put_irq; | ||
2479 | } else { | ||
2480 | ring->irq_get = i9xx_ring_get_irq; | ||
2481 | ring->irq_put = i9xx_ring_put_irq; | ||
2482 | } | ||
2483 | ring->irq_enable_mask = I915_USER_INTERRUPT; | ||
2484 | ring->write_tail = ring_write_tail; | ||
2485 | if (INTEL_INFO(dev)->gen >= 4) | ||
2486 | ring->dispatch_execbuffer = i965_dispatch_execbuffer; | ||
2487 | else if (IS_I830(dev) || IS_845G(dev)) | ||
2488 | ring->dispatch_execbuffer = i830_dispatch_execbuffer; | ||
2489 | else | ||
2490 | ring->dispatch_execbuffer = i915_dispatch_execbuffer; | ||
2491 | ring->init = init_render_ring; | ||
2492 | ring->cleanup = render_ring_cleanup; | ||
2493 | |||
2494 | ring->dev = dev; | ||
2495 | INIT_LIST_HEAD(&ring->active_list); | ||
2496 | INIT_LIST_HEAD(&ring->request_list); | ||
2497 | |||
2498 | ringbuf->size = size; | ||
2499 | ringbuf->effective_size = ringbuf->size; | ||
2500 | if (IS_I830(ring->dev) || IS_845G(ring->dev)) | ||
2501 | ringbuf->effective_size -= 2 * CACHELINE_BYTES; | ||
2502 | |||
2503 | ringbuf->virtual_start = ioremap_wc(start, size); | ||
2504 | if (ringbuf->virtual_start == NULL) { | ||
2505 | DRM_ERROR("can not ioremap virtual address for" | ||
2506 | " ring buffer\n"); | ||
2507 | ret = -ENOMEM; | ||
2508 | goto err_ringbuf; | ||
2509 | } | ||
2510 | |||
2511 | if (!I915_NEED_GFX_HWS(dev)) { | ||
2512 | ret = init_phys_status_page(ring); | ||
2513 | if (ret) | ||
2514 | goto err_vstart; | ||
2515 | } | ||
2516 | |||
2517 | return 0; | ||
2518 | |||
2519 | err_vstart: | ||
2520 | iounmap(ringbuf->virtual_start); | ||
2521 | err_ringbuf: | ||
2522 | kfree(ringbuf); | ||
2523 | ring->buffer = NULL; | ||
2524 | return ret; | ||
2525 | } | ||
2526 | |||
2527 | int intel_init_bsd_ring_buffer(struct drm_device *dev) | 2447 | int intel_init_bsd_ring_buffer(struct drm_device *dev) |
2528 | { | 2448 | { |
2529 | struct drm_i915_private *dev_priv = dev->dev_private; | 2449 | struct drm_i915_private *dev_priv = dev->dev_private; |