diff options
author | Maarten Lankhorst <maarten.lankhorst@canonical.com> | 2014-01-09 05:03:08 -0500 |
---|---|---|
committer | Maarten Lankhorst <maarten.lankhorst@canonical.com> | 2014-09-01 04:16:43 -0400 |
commit | 58b4d720c1620bbf09e42b4f218dcb2d0d8cdf3e (patch) | |
tree | 87edd6d708d020f349702eb42049bef343540aec | |
parent | dd7cfd641228abb2669d8d047d5ec377b1835900 (diff) |
drm/ttm: add interruptible parameter to ttm_eu_reserve_buffers
It seems some drivers really want this as a parameter,
like vmwgfx.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_release.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_object.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_vm.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_execbuf_util.c | 22 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 2 | ||||
-rw-r--r-- | include/drm/ttm/ttm_execbuf_util.h | 9 |
7 files changed, 24 insertions, 22 deletions
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 2e5e38fee9b2..656f9d3a946d 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c | |||
@@ -159,7 +159,7 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr) | |||
159 | if (list_is_singular(&release->bos)) | 159 | if (list_is_singular(&release->bos)) |
160 | return 0; | 160 | return 0; |
161 | 161 | ||
162 | ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos); | 162 | ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, !no_intr); |
163 | if (ret) | 163 | if (ret) |
164 | return ret; | 164 | return ret; |
165 | 165 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index cbac963571c0..378fe9ea4d44 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -482,7 +482,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, | |||
482 | u64 bytes_moved = 0, initial_bytes_moved; | 482 | u64 bytes_moved = 0, initial_bytes_moved; |
483 | u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); | 483 | u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); |
484 | 484 | ||
485 | r = ttm_eu_reserve_buffers(ticket, head); | 485 | r = ttm_eu_reserve_buffers(ticket, head, true); |
486 | if (unlikely(r != 0)) { | 486 | if (unlikely(r != 0)) { |
487 | return r; | 487 | return r; |
488 | } | 488 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 4751c6728fe9..3d9a6a036f8a 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c | |||
@@ -399,7 +399,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev, | |||
399 | INIT_LIST_HEAD(&head); | 399 | INIT_LIST_HEAD(&head); |
400 | list_add(&tv.head, &head); | 400 | list_add(&tv.head, &head); |
401 | 401 | ||
402 | r = ttm_eu_reserve_buffers(&ticket, &head); | 402 | r = ttm_eu_reserve_buffers(&ticket, &head, true); |
403 | if (r) | 403 | if (r) |
404 | return r; | 404 | return r; |
405 | 405 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 0fbbbbd67afc..87d7deefc806 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c | |||
@@ -112,7 +112,7 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation); | |||
112 | */ | 112 | */ |
113 | 113 | ||
114 | int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, | 114 | int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, |
115 | struct list_head *list) | 115 | struct list_head *list, bool intr) |
116 | { | 116 | { |
117 | struct ttm_bo_global *glob; | 117 | struct ttm_bo_global *glob; |
118 | struct ttm_validate_buffer *entry; | 118 | struct ttm_validate_buffer *entry; |
@@ -140,7 +140,7 @@ retry: | |||
140 | if (entry->reserved) | 140 | if (entry->reserved) |
141 | continue; | 141 | continue; |
142 | 142 | ||
143 | ret = __ttm_bo_reserve(bo, true, (ticket == NULL), true, | 143 | ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true, |
144 | ticket); | 144 | ticket); |
145 | 145 | ||
146 | if (ret == -EDEADLK) { | 146 | if (ret == -EDEADLK) { |
@@ -153,13 +153,17 @@ retry: | |||
153 | ttm_eu_backoff_reservation_locked(list); | 153 | ttm_eu_backoff_reservation_locked(list); |
154 | spin_unlock(&glob->lru_lock); | 154 | spin_unlock(&glob->lru_lock); |
155 | ttm_eu_list_ref_sub(list); | 155 | ttm_eu_list_ref_sub(list); |
156 | ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, | 156 | |
157 | ticket); | 157 | if (intr) { |
158 | if (unlikely(ret != 0)) { | 158 | ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, |
159 | if (ret == -EINTR) | 159 | ticket); |
160 | ret = -ERESTARTSYS; | 160 | if (unlikely(ret != 0)) { |
161 | goto err_fini; | 161 | if (ret == -EINTR) |
162 | } | 162 | ret = -ERESTARTSYS; |
163 | goto err_fini; | ||
164 | } | ||
165 | } else | ||
166 | ww_mutex_lock_slow(&bo->resv->lock, ticket); | ||
163 | 167 | ||
164 | entry->reserved = true; | 168 | entry->reserved = true; |
165 | if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { | 169 | if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 7bfdaa163a33..24f067bf438d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -2496,7 +2496,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, | |||
2496 | if (unlikely(ret != 0)) | 2496 | if (unlikely(ret != 0)) |
2497 | goto out_err_nores; | 2497 | goto out_err_nores; |
2498 | 2498 | ||
2499 | ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes); | 2499 | ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, true); |
2500 | if (unlikely(ret != 0)) | 2500 | if (unlikely(ret != 0)) |
2501 | goto out_err; | 2501 | goto out_err; |
2502 | 2502 | ||
@@ -2684,10 +2684,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, | |||
2684 | query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); | 2684 | query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); |
2685 | list_add_tail(&query_val.head, &validate_list); | 2685 | list_add_tail(&query_val.head, &validate_list); |
2686 | 2686 | ||
2687 | do { | 2687 | ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false); |
2688 | ret = ttm_eu_reserve_buffers(&ticket, &validate_list); | ||
2689 | } while (ret == -ERESTARTSYS); | ||
2690 | |||
2691 | if (unlikely(ret != 0)) { | 2688 | if (unlikely(ret != 0)) { |
2692 | vmw_execbuf_unpin_panic(dev_priv); | 2689 | vmw_execbuf_unpin_panic(dev_priv); |
2693 | goto out_no_reserve; | 2690 | goto out_no_reserve; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 1ee86bf82750..23169362bca8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -1216,7 +1216,7 @@ vmw_resource_check_buffer(struct vmw_resource *res, | |||
1216 | INIT_LIST_HEAD(&val_list); | 1216 | INIT_LIST_HEAD(&val_list); |
1217 | val_buf->bo = ttm_bo_reference(&res->backup->base); | 1217 | val_buf->bo = ttm_bo_reference(&res->backup->base); |
1218 | list_add_tail(&val_buf->head, &val_list); | 1218 | list_add_tail(&val_buf->head, &val_list); |
1219 | ret = ttm_eu_reserve_buffers(NULL, &val_list); | 1219 | ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible); |
1220 | if (unlikely(ret != 0)) | 1220 | if (unlikely(ret != 0)) |
1221 | goto out_no_reserve; | 1221 | goto out_no_reserve; |
1222 | 1222 | ||
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h index 16db7d01a336..fd95fd569ca3 100644 --- a/include/drm/ttm/ttm_execbuf_util.h +++ b/include/drm/ttm/ttm_execbuf_util.h | |||
@@ -73,6 +73,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, | |||
73 | * @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only | 73 | * @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only |
74 | * non-blocking reserves should be tried. | 74 | * non-blocking reserves should be tried. |
75 | * @list: thread private list of ttm_validate_buffer structs. | 75 | * @list: thread private list of ttm_validate_buffer structs. |
76 | * @intr: should the wait be interruptible | ||
76 | * | 77 | * |
77 | * Tries to reserve bos pointed to by the list entries for validation. | 78 | * Tries to reserve bos pointed to by the list entries for validation. |
78 | * If the function returns 0, all buffers are marked as "unfenced", | 79 | * If the function returns 0, all buffers are marked as "unfenced", |
@@ -84,9 +85,9 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, | |||
84 | * CPU write reservations to be cleared, and for other threads to | 85 | * CPU write reservations to be cleared, and for other threads to |
85 | * unreserve their buffers. | 86 | * unreserve their buffers. |
86 | * | 87 | * |
87 | * This function may return -ERESTART or -EAGAIN if the calling process | 88 | * If intr is set to true, this function may return -ERESTARTSYS if the |
88 | * receives a signal while waiting. In that case, no buffers on the list | 89 | * calling process receives a signal while waiting. In that case, no |
89 | * will be reserved upon return. | 90 | * buffers on the list will be reserved upon return. |
90 | * | 91 | * |
91 | * Buffers reserved by this function should be unreserved by | 92 | * Buffers reserved by this function should be unreserved by |
92 | * a call to either ttm_eu_backoff_reservation() or | 93 | * a call to either ttm_eu_backoff_reservation() or |
@@ -95,7 +96,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, | |||
95 | */ | 96 | */ |
96 | 97 | ||
97 | extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, | 98 | extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, |
98 | struct list_head *list); | 99 | struct list_head *list, bool intr); |
99 | 100 | ||
100 | /** | 101 | /** |
101 | * function ttm_eu_fence_buffer_objects. | 102 | * function ttm_eu_fence_buffer_objects. |