aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Daniel <thomas.daniel@intel.com>2014-11-13 05:28:56 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-11-19 13:56:44 -0500
commit7ba717cf365d79f2b284e508205ec3d4a05fc41b (patch)
tree99d5b23a67d1a5381b50356d9c1135687e0e997f
parentdcb4c12a687710ab745c2cdee8298c3e97f6f707 (diff)
drm/i915/bdw: Pin the ringbuffer backing object to GGTT on-demand
Same as with the context, pinning to GGTT regardless is harmful (it badly fragments the GGTT and can even exhaust it). Unfortunately, this case is also more complex than the previous one because we need to map and access the ringbuffer in several places along the execbuffer path (and we cannot make do by leaving the default ringbuffer pinned, as before). Also, the context object itself contains a pointer to the ringbuffer address that we have to keep updated if we are going to allow the ringbuffer to move around. v2: Same as with the context pinning, we cannot really do it during an interrupt. Also, pin the default ringbuffers objects regardless (makes error capture a lot easier). v3: Rebased. Take a pin reference of the ringbuffer for each item in the execlist request queue because the hardware may still be using the ringbuffer after the MI_USER_INTERRUPT to notify the seqno update is executed. The ringbuffer must remain pinned until the context save is complete. No longer pin and unpin ringbuffer in populate_lr_context() - this transient address is meaningless and the pinning can cause a sleep while atomic. v4: Moved ringbuffer pin and unpin into the lr_context_pin functions. Downgraded pinning check BUG_ONs to WARN_ONs. v5: Reinstated WARN_ONs for unexpected execlist states. Removed unused variable. Issue: VIZ-4277 Signed-off-by: Oscar Mateo <oscar.mateo@intel.com> Signed-off-by: Thomas Daniel <thomas.daniel@intel.com> Reviewed-by: Akash Goel <akash.goels@gmail.com> Reviewed-by: Deepak S<deepak.s@linux.intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c102
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c85
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h3
3 files changed, 128 insertions, 62 deletions
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 9560e634c9b8..e588376227ea 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -203,6 +203,9 @@ enum {
203}; 203};
204#define GEN8_CTX_ID_SHIFT 32 204#define GEN8_CTX_ID_SHIFT 32
205 205
206static int intel_lr_context_pin(struct intel_engine_cs *ring,
207 struct intel_context *ctx);
208
206/** 209/**
207 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists 210 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
208 * @dev: DRM device. 211 * @dev: DRM device.
@@ -354,7 +357,9 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
354 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); 357 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
355} 358}
356 359
357static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tail) 360static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
361 struct drm_i915_gem_object *ring_obj,
362 u32 tail)
358{ 363{
359 struct page *page; 364 struct page *page;
360 uint32_t *reg_state; 365 uint32_t *reg_state;
@@ -363,6 +368,7 @@ static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tai
363 reg_state = kmap_atomic(page); 368 reg_state = kmap_atomic(page);
364 369
365 reg_state[CTX_RING_TAIL+1] = tail; 370 reg_state[CTX_RING_TAIL+1] = tail;
371 reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
366 372
367 kunmap_atomic(reg_state); 373 kunmap_atomic(reg_state);
368 374
@@ -373,21 +379,25 @@ static void execlists_submit_contexts(struct intel_engine_cs *ring,
373 struct intel_context *to0, u32 tail0, 379 struct intel_context *to0, u32 tail0,
374 struct intel_context *to1, u32 tail1) 380 struct intel_context *to1, u32 tail1)
375{ 381{
376 struct drm_i915_gem_object *ctx_obj0; 382 struct drm_i915_gem_object *ctx_obj0 = to0->engine[ring->id].state;
383 struct intel_ringbuffer *ringbuf0 = to0->engine[ring->id].ringbuf;
377 struct drm_i915_gem_object *ctx_obj1 = NULL; 384 struct drm_i915_gem_object *ctx_obj1 = NULL;
385 struct intel_ringbuffer *ringbuf1 = NULL;
378 386
379 ctx_obj0 = to0->engine[ring->id].state;
380 BUG_ON(!ctx_obj0); 387 BUG_ON(!ctx_obj0);
381 WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0)); 388 WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
389 WARN_ON(!i915_gem_obj_is_pinned(ringbuf0->obj));
382 390
383 execlists_ctx_write_tail(ctx_obj0, tail0); 391 execlists_update_context(ctx_obj0, ringbuf0->obj, tail0);
384 392
385 if (to1) { 393 if (to1) {
394 ringbuf1 = to1->engine[ring->id].ringbuf;
386 ctx_obj1 = to1->engine[ring->id].state; 395 ctx_obj1 = to1->engine[ring->id].state;
387 BUG_ON(!ctx_obj1); 396 BUG_ON(!ctx_obj1);
388 WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1)); 397 WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
398 WARN_ON(!i915_gem_obj_is_pinned(ringbuf1->obj));
389 399
390 execlists_ctx_write_tail(ctx_obj1, tail1); 400 execlists_update_context(ctx_obj1, ringbuf1->obj, tail1);
391 } 401 }
392 402
393 execlists_elsp_write(ring, ctx_obj0, ctx_obj1); 403 execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
@@ -537,6 +547,10 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
537 return -ENOMEM; 547 return -ENOMEM;
538 req->ctx = to; 548 req->ctx = to;
539 i915_gem_context_reference(req->ctx); 549 i915_gem_context_reference(req->ctx);
550
551 if (to != ring->default_context)
552 intel_lr_context_pin(ring, to);
553
540 req->ring = ring; 554 req->ring = ring;
541 req->tail = tail; 555 req->tail = tail;
542 556
@@ -557,7 +571,7 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
557 571
558 if (to == tail_req->ctx) { 572 if (to == tail_req->ctx) {
559 WARN(tail_req->elsp_submitted != 0, 573 WARN(tail_req->elsp_submitted != 0,
560 "More than 2 already-submitted reqs queued\n"); 574 "More than 2 already-submitted reqs queued\n");
561 list_del(&tail_req->execlist_link); 575 list_del(&tail_req->execlist_link);
562 list_add_tail(&tail_req->execlist_link, 576 list_add_tail(&tail_req->execlist_link,
563 &ring->execlist_retired_req_list); 577 &ring->execlist_retired_req_list);
@@ -745,6 +759,12 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
745 spin_unlock_irqrestore(&ring->execlist_lock, flags); 759 spin_unlock_irqrestore(&ring->execlist_lock, flags);
746 760
747 list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) { 761 list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
762 struct intel_context *ctx = req->ctx;
763 struct drm_i915_gem_object *ctx_obj =
764 ctx->engine[ring->id].state;
765
766 if (ctx_obj && (ctx != ring->default_context))
767 intel_lr_context_unpin(ring, ctx);
748 intel_runtime_pm_put(dev_priv); 768 intel_runtime_pm_put(dev_priv);
749 i915_gem_context_unreference(req->ctx); 769 i915_gem_context_unreference(req->ctx);
750 list_del(&req->execlist_link); 770 list_del(&req->execlist_link);
@@ -816,6 +836,7 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
816 struct intel_context *ctx) 836 struct intel_context *ctx)
817{ 837{
818 struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; 838 struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
839 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
819 int ret = 0; 840 int ret = 0;
820 841
821 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); 842 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
@@ -823,21 +844,35 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
823 ret = i915_gem_obj_ggtt_pin(ctx_obj, 844 ret = i915_gem_obj_ggtt_pin(ctx_obj,
824 GEN8_LR_CONTEXT_ALIGN, 0); 845 GEN8_LR_CONTEXT_ALIGN, 0);
825 if (ret) 846 if (ret)
826 ctx->engine[ring->id].unpin_count = 0; 847 goto reset_unpin_count;
848
849 ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
850 if (ret)
851 goto unpin_ctx_obj;
827 } 852 }
828 853
829 return ret; 854 return ret;
855
856unpin_ctx_obj:
857 i915_gem_object_ggtt_unpin(ctx_obj);
858reset_unpin_count:
859 ctx->engine[ring->id].unpin_count = 0;
860
861 return ret;
830} 862}
831 863
832void intel_lr_context_unpin(struct intel_engine_cs *ring, 864void intel_lr_context_unpin(struct intel_engine_cs *ring,
833 struct intel_context *ctx) 865 struct intel_context *ctx)
834{ 866{
835 struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; 867 struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
868 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
836 869
837 if (ctx_obj) { 870 if (ctx_obj) {
838 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); 871 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
839 if (--ctx->engine[ring->id].unpin_count == 0) 872 if (--ctx->engine[ring->id].unpin_count == 0) {
873 intel_unpin_ringbuffer_obj(ringbuf);
840 i915_gem_object_ggtt_unpin(ctx_obj); 874 i915_gem_object_ggtt_unpin(ctx_obj);
875 }
841 } 876 }
842} 877}
843 878
@@ -1595,7 +1630,6 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
1595{ 1630{
1596 struct drm_device *dev = ring->dev; 1631 struct drm_device *dev = ring->dev;
1597 struct drm_i915_private *dev_priv = dev->dev_private; 1632 struct drm_i915_private *dev_priv = dev->dev_private;
1598 struct drm_i915_gem_object *ring_obj = ringbuf->obj;
1599 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 1633 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
1600 struct page *page; 1634 struct page *page;
1601 uint32_t *reg_state; 1635 uint32_t *reg_state;
@@ -1641,7 +1675,9 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
1641 reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base); 1675 reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
1642 reg_state[CTX_RING_TAIL+1] = 0; 1676 reg_state[CTX_RING_TAIL+1] = 0;
1643 reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base); 1677 reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
1644 reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj); 1678 /* Ring buffer start address is not known until the buffer is pinned.
1679 * It is written to the context image in execlists_update_context()
1680 */
1645 reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base); 1681 reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
1646 reg_state[CTX_RING_BUFFER_CONTROL+1] = 1682 reg_state[CTX_RING_BUFFER_CONTROL+1] =
1647 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID; 1683 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
@@ -1723,10 +1759,12 @@ void intel_lr_context_free(struct intel_context *ctx)
1723 ctx->engine[i].ringbuf; 1759 ctx->engine[i].ringbuf;
1724 struct intel_engine_cs *ring = ringbuf->ring; 1760 struct intel_engine_cs *ring = ringbuf->ring;
1725 1761
1762 if (ctx == ring->default_context) {
1763 intel_unpin_ringbuffer_obj(ringbuf);
1764 i915_gem_object_ggtt_unpin(ctx_obj);
1765 }
1726 intel_destroy_ringbuffer_obj(ringbuf); 1766 intel_destroy_ringbuffer_obj(ringbuf);
1727 kfree(ringbuf); 1767 kfree(ringbuf);
1728 if (ctx == ring->default_context)
1729 i915_gem_object_ggtt_unpin(ctx_obj);
1730 drm_gem_object_unreference(&ctx_obj->base); 1768 drm_gem_object_unreference(&ctx_obj->base);
1731 } 1769 }
1732 } 1770 }
@@ -1823,11 +1861,8 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
1823 if (!ringbuf) { 1861 if (!ringbuf) {
1824 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n", 1862 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
1825 ring->name); 1863 ring->name);
1826 if (is_global_default_ctx)
1827 i915_gem_object_ggtt_unpin(ctx_obj);
1828 drm_gem_object_unreference(&ctx_obj->base);
1829 ret = -ENOMEM; 1864 ret = -ENOMEM;
1830 return ret; 1865 goto error_unpin_ctx;
1831 } 1866 }
1832 1867
1833 ringbuf->ring = ring; 1868 ringbuf->ring = ring;
@@ -1840,22 +1875,30 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
1840 ringbuf->space = ringbuf->size; 1875 ringbuf->space = ringbuf->size;
1841 ringbuf->last_retired_head = -1; 1876 ringbuf->last_retired_head = -1;
1842 1877
1843 /* TODO: For now we put this in the mappable region so that we can reuse 1878 if (ringbuf->obj == NULL) {
1844 * the existing ringbuffer code which ioremaps it. When we start 1879 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
1845 * creating many contexts, this will no longer work and we must switch 1880 if (ret) {
1846 * to a kmapish interface. 1881 DRM_DEBUG_DRIVER(
1847 */ 1882 "Failed to allocate ringbuffer obj %s: %d\n",
1848 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
1849 if (ret) {
1850 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
1851 ring->name, ret); 1883 ring->name, ret);
1852 goto error; 1884 goto error_free_rbuf;
1885 }
1886
1887 if (is_global_default_ctx) {
1888 ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
1889 if (ret) {
1890 DRM_ERROR(
1891 "Failed to pin and map ringbuffer %s: %d\n",
1892 ring->name, ret);
1893 goto error_destroy_rbuf;
1894 }
1895 }
1896
1853 } 1897 }
1854 1898
1855 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf); 1899 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
1856 if (ret) { 1900 if (ret) {
1857 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret); 1901 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
1858 intel_destroy_ringbuffer_obj(ringbuf);
1859 goto error; 1902 goto error;
1860 } 1903 }
1861 1904
@@ -1877,7 +1920,6 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
1877 DRM_ERROR("Init render state failed: %d\n", ret); 1920 DRM_ERROR("Init render state failed: %d\n", ret);
1878 ctx->engine[ring->id].ringbuf = NULL; 1921 ctx->engine[ring->id].ringbuf = NULL;
1879 ctx->engine[ring->id].state = NULL; 1922 ctx->engine[ring->id].state = NULL;
1880 intel_destroy_ringbuffer_obj(ringbuf);
1881 goto error; 1923 goto error;
1882 } 1924 }
1883 ctx->rcs_initialized = true; 1925 ctx->rcs_initialized = true;
@@ -1886,7 +1928,13 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
1886 return 0; 1928 return 0;
1887 1929
1888error: 1930error:
1931 if (is_global_default_ctx)
1932 intel_unpin_ringbuffer_obj(ringbuf);
1933error_destroy_rbuf:
1934 intel_destroy_ringbuffer_obj(ringbuf);
1935error_free_rbuf:
1889 kfree(ringbuf); 1936 kfree(ringbuf);
1937error_unpin_ctx:
1890 if (is_global_default_ctx) 1938 if (is_global_default_ctx)
1891 i915_gem_object_ggtt_unpin(ctx_obj); 1939 i915_gem_object_ggtt_unpin(ctx_obj);
1892 drm_gem_object_unreference(&ctx_obj->base); 1940 drm_gem_object_unreference(&ctx_obj->base);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ae092589ea0c..0a4f35e735c3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1722,13 +1722,42 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
1722 return 0; 1722 return 0;
1723} 1723}
1724 1724
1725void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 1725void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
1726{ 1726{
1727 if (!ringbuf->obj)
1728 return;
1729
1730 iounmap(ringbuf->virtual_start); 1727 iounmap(ringbuf->virtual_start);
1728 ringbuf->virtual_start = NULL;
1731 i915_gem_object_ggtt_unpin(ringbuf->obj); 1729 i915_gem_object_ggtt_unpin(ringbuf->obj);
1730}
1731
1732int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
1733 struct intel_ringbuffer *ringbuf)
1734{
1735 struct drm_i915_private *dev_priv = to_i915(dev);
1736 struct drm_i915_gem_object *obj = ringbuf->obj;
1737 int ret;
1738
1739 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
1740 if (ret)
1741 return ret;
1742
1743 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1744 if (ret) {
1745 i915_gem_object_ggtt_unpin(obj);
1746 return ret;
1747 }
1748
1749 ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
1750 i915_gem_obj_ggtt_offset(obj), ringbuf->size);
1751 if (ringbuf->virtual_start == NULL) {
1752 i915_gem_object_ggtt_unpin(obj);
1753 return -EINVAL;
1754 }
1755
1756 return 0;
1757}
1758
1759void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
1760{
1732 drm_gem_object_unreference(&ringbuf->obj->base); 1761 drm_gem_object_unreference(&ringbuf->obj->base);
1733 ringbuf->obj = NULL; 1762 ringbuf->obj = NULL;
1734} 1763}
@@ -1736,12 +1765,7 @@ void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
1736int intel_alloc_ringbuffer_obj(struct drm_device *dev, 1765int intel_alloc_ringbuffer_obj(struct drm_device *dev,
1737 struct intel_ringbuffer *ringbuf) 1766 struct intel_ringbuffer *ringbuf)
1738{ 1767{
1739 struct drm_i915_private *dev_priv = to_i915(dev);
1740 struct drm_i915_gem_object *obj; 1768 struct drm_i915_gem_object *obj;
1741 int ret;
1742
1743 if (ringbuf->obj)
1744 return 0;
1745 1769
1746 obj = NULL; 1770 obj = NULL;
1747 if (!HAS_LLC(dev)) 1771 if (!HAS_LLC(dev))
@@ -1754,30 +1778,9 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev,
1754 /* mark ring buffers as read-only from GPU side by default */ 1778 /* mark ring buffers as read-only from GPU side by default */
1755 obj->gt_ro = 1; 1779 obj->gt_ro = 1;
1756 1780
1757 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
1758 if (ret)
1759 goto err_unref;
1760
1761 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1762 if (ret)
1763 goto err_unpin;
1764
1765 ringbuf->virtual_start =
1766 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
1767 ringbuf->size);
1768 if (ringbuf->virtual_start == NULL) {
1769 ret = -EINVAL;
1770 goto err_unpin;
1771 }
1772
1773 ringbuf->obj = obj; 1781 ringbuf->obj = obj;
1774 return 0;
1775 1782
1776err_unpin: 1783 return 0;
1777 i915_gem_object_ggtt_unpin(obj);
1778err_unref:
1779 drm_gem_object_unreference(&obj->base);
1780 return ret;
1781} 1784}
1782 1785
1783static int intel_init_ring_buffer(struct drm_device *dev, 1786static int intel_init_ring_buffer(struct drm_device *dev,
@@ -1814,10 +1817,21 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1814 goto error; 1817 goto error;
1815 } 1818 }
1816 1819
1817 ret = intel_alloc_ringbuffer_obj(dev, ringbuf); 1820 if (ringbuf->obj == NULL) {
1818 if (ret) { 1821 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
1819 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret); 1822 if (ret) {
1820 goto error; 1823 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
1824 ring->name, ret);
1825 goto error;
1826 }
1827
1828 ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
1829 if (ret) {
1830 DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
1831 ring->name, ret);
1832 intel_destroy_ringbuffer_obj(ringbuf);
1833 goto error;
1834 }
1821 } 1835 }
1822 1836
1823 /* Workaround an erratum on the i830 which causes a hang if 1837 /* Workaround an erratum on the i830 which causes a hang if
@@ -1858,6 +1872,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
1858 intel_stop_ring_buffer(ring); 1872 intel_stop_ring_buffer(ring);
1859 WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); 1873 WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
1860 1874
1875 intel_unpin_ringbuffer_obj(ringbuf);
1861 intel_destroy_ringbuffer_obj(ringbuf); 1876 intel_destroy_ringbuffer_obj(ringbuf);
1862 ring->preallocated_lazy_request = NULL; 1877 ring->preallocated_lazy_request = NULL;
1863 ring->outstanding_lazy_seqno = 0; 1878 ring->outstanding_lazy_seqno = 0;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 85156567044b..9eb3188595a6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -383,6 +383,9 @@ intel_write_status_page(struct intel_engine_cs *ring,
383#define I915_GEM_HWS_SCRATCH_INDEX 0x30 383#define I915_GEM_HWS_SCRATCH_INDEX 0x30
384#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 384#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
385 385
386void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
387int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
388 struct intel_ringbuffer *ringbuf);
386void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf); 389void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
387int intel_alloc_ringbuffer_obj(struct drm_device *dev, 390int intel_alloc_ringbuffer_obj(struct drm_device *dev,
388 struct intel_ringbuffer *ringbuf); 391 struct intel_ringbuffer *ringbuf);