diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_execbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 95 |
1 files changed, 25 insertions, 70 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 60dc2a865f5f..3f0c612d42e7 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -66,6 +66,15 @@ enum { | |||
66 | #define __I915_EXEC_ILLEGAL_FLAGS \ | 66 | #define __I915_EXEC_ILLEGAL_FLAGS \ |
67 | (__I915_EXEC_UNKNOWN_FLAGS | I915_EXEC_CONSTANTS_MASK) | 67 | (__I915_EXEC_UNKNOWN_FLAGS | I915_EXEC_CONSTANTS_MASK) |
68 | 68 | ||
69 | /* Catch emission of unexpected errors for CI! */ | ||
70 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) | ||
71 | #undef EINVAL | ||
72 | #define EINVAL ({ \ | ||
73 | DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \ | ||
74 | 22; \ | ||
75 | }) | ||
76 | #endif | ||
77 | |||
69 | /** | 78 | /** |
70 | * DOC: User command execution | 79 | * DOC: User command execution |
71 | * | 80 | * |
@@ -534,7 +543,8 @@ eb_add_vma(struct i915_execbuffer *eb, | |||
534 | * paranoia do it everywhere. | 543 | * paranoia do it everywhere. |
535 | */ | 544 | */ |
536 | if (i == batch_idx) { | 545 | if (i == batch_idx) { |
537 | if (!(eb->flags[i] & EXEC_OBJECT_PINNED)) | 546 | if (entry->relocation_count && |
547 | !(eb->flags[i] & EXEC_OBJECT_PINNED)) | ||
538 | eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS; | 548 | eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS; |
539 | if (eb->reloc_cache.has_fence) | 549 | if (eb->reloc_cache.has_fence) |
540 | eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE; | 550 | eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE; |
@@ -1155,18 +1165,16 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, | |||
1155 | goto err_request; | 1165 | goto err_request; |
1156 | 1166 | ||
1157 | GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true)); | 1167 | GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true)); |
1158 | i915_vma_move_to_active(batch, rq, 0); | 1168 | err = i915_vma_move_to_active(batch, rq, 0); |
1159 | reservation_object_lock(batch->resv, NULL); | 1169 | if (err) |
1160 | reservation_object_add_excl_fence(batch->resv, &rq->fence); | 1170 | goto skip_request; |
1161 | reservation_object_unlock(batch->resv); | ||
1162 | i915_vma_unpin(batch); | ||
1163 | 1171 | ||
1164 | i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); | 1172 | err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); |
1165 | reservation_object_lock(vma->resv, NULL); | 1173 | if (err) |
1166 | reservation_object_add_excl_fence(vma->resv, &rq->fence); | 1174 | goto skip_request; |
1167 | reservation_object_unlock(vma->resv); | ||
1168 | 1175 | ||
1169 | rq->batch = batch; | 1176 | rq->batch = batch; |
1177 | i915_vma_unpin(batch); | ||
1170 | 1178 | ||
1171 | cache->rq = rq; | 1179 | cache->rq = rq; |
1172 | cache->rq_cmd = cmd; | 1180 | cache->rq_cmd = cmd; |
@@ -1175,6 +1183,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, | |||
1175 | /* Return with batch mapping (cmd) still pinned */ | 1183 | /* Return with batch mapping (cmd) still pinned */ |
1176 | return 0; | 1184 | return 0; |
1177 | 1185 | ||
1186 | skip_request: | ||
1187 | i915_request_skip(rq, err); | ||
1178 | err_request: | 1188 | err_request: |
1179 | i915_request_add(rq); | 1189 | i915_request_add(rq); |
1180 | err_unpin: | 1190 | err_unpin: |
@@ -1761,25 +1771,6 @@ slow: | |||
1761 | return eb_relocate_slow(eb); | 1771 | return eb_relocate_slow(eb); |
1762 | } | 1772 | } |
1763 | 1773 | ||
1764 | static void eb_export_fence(struct i915_vma *vma, | ||
1765 | struct i915_request *rq, | ||
1766 | unsigned int flags) | ||
1767 | { | ||
1768 | struct reservation_object *resv = vma->resv; | ||
1769 | |||
1770 | /* | ||
1771 | * Ignore errors from failing to allocate the new fence, we can't | ||
1772 | * handle an error right now. Worst case should be missed | ||
1773 | * synchronisation leading to rendering corruption. | ||
1774 | */ | ||
1775 | reservation_object_lock(resv, NULL); | ||
1776 | if (flags & EXEC_OBJECT_WRITE) | ||
1777 | reservation_object_add_excl_fence(resv, &rq->fence); | ||
1778 | else if (reservation_object_reserve_shared(resv) == 0) | ||
1779 | reservation_object_add_shared_fence(resv, &rq->fence); | ||
1780 | reservation_object_unlock(resv); | ||
1781 | } | ||
1782 | |||
1783 | static int eb_move_to_gpu(struct i915_execbuffer *eb) | 1774 | static int eb_move_to_gpu(struct i915_execbuffer *eb) |
1784 | { | 1775 | { |
1785 | const unsigned int count = eb->buffer_count; | 1776 | const unsigned int count = eb->buffer_count; |
@@ -1833,8 +1824,11 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) | |||
1833 | unsigned int flags = eb->flags[i]; | 1824 | unsigned int flags = eb->flags[i]; |
1834 | struct i915_vma *vma = eb->vma[i]; | 1825 | struct i915_vma *vma = eb->vma[i]; |
1835 | 1826 | ||
1836 | i915_vma_move_to_active(vma, eb->request, flags); | 1827 | err = i915_vma_move_to_active(vma, eb->request, flags); |
1837 | eb_export_fence(vma, eb->request, flags); | 1828 | if (unlikely(err)) { |
1829 | i915_request_skip(eb->request, err); | ||
1830 | return err; | ||
1831 | } | ||
1838 | 1832 | ||
1839 | __eb_unreserve_vma(vma, flags); | 1833 | __eb_unreserve_vma(vma, flags); |
1840 | vma->exec_flags = NULL; | 1834 | vma->exec_flags = NULL; |
@@ -1874,45 +1868,6 @@ static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) | |||
1874 | return true; | 1868 | return true; |
1875 | } | 1869 | } |
1876 | 1870 | ||
1877 | void i915_vma_move_to_active(struct i915_vma *vma, | ||
1878 | struct i915_request *rq, | ||
1879 | unsigned int flags) | ||
1880 | { | ||
1881 | struct drm_i915_gem_object *obj = vma->obj; | ||
1882 | const unsigned int idx = rq->engine->id; | ||
1883 | |||
1884 | lockdep_assert_held(&rq->i915->drm.struct_mutex); | ||
1885 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); | ||
1886 | |||
1887 | /* | ||
1888 | * Add a reference if we're newly entering the active list. | ||
1889 | * The order in which we add operations to the retirement queue is | ||
1890 | * vital here: mark_active adds to the start of the callback list, | ||
1891 | * such that subsequent callbacks are called first. Therefore we | ||
1892 | * add the active reference first and queue for it to be dropped | ||
1893 | * *last*. | ||
1894 | */ | ||
1895 | if (!i915_vma_is_active(vma)) | ||
1896 | obj->active_count++; | ||
1897 | i915_vma_set_active(vma, idx); | ||
1898 | i915_gem_active_set(&vma->last_read[idx], rq); | ||
1899 | list_move_tail(&vma->vm_link, &vma->vm->active_list); | ||
1900 | |||
1901 | obj->write_domain = 0; | ||
1902 | if (flags & EXEC_OBJECT_WRITE) { | ||
1903 | obj->write_domain = I915_GEM_DOMAIN_RENDER; | ||
1904 | |||
1905 | if (intel_fb_obj_invalidate(obj, ORIGIN_CS)) | ||
1906 | i915_gem_active_set(&obj->frontbuffer_write, rq); | ||
1907 | |||
1908 | obj->read_domains = 0; | ||
1909 | } | ||
1910 | obj->read_domains |= I915_GEM_GPU_DOMAINS; | ||
1911 | |||
1912 | if (flags & EXEC_OBJECT_NEEDS_FENCE) | ||
1913 | i915_gem_active_set(&vma->last_fence, rq); | ||
1914 | } | ||
1915 | |||
1916 | static int i915_reset_gen7_sol_offsets(struct i915_request *rq) | 1871 | static int i915_reset_gen7_sol_offsets(struct i915_request *rq) |
1917 | { | 1872 | { |
1918 | u32 *cs; | 1873 | u32 *cs; |