diff options
author | Christian König <christian.koenig@amd.com> | 2014-09-04 14:01:52 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2014-09-11 10:46:00 -0400 |
commit | ae9c0af2c0ea92e57013ab2dd7271ba7d6b2a833 (patch) | |
tree | b524a3c3960582346b60418d068d96317e79696b | |
parent | c4d922b14544d115232b7448a2ea7640ba901eb6 (diff) |
drm/ttm: allow fence to be added as shared
This patch adds a new flag to the ttm_validate_buffer list to
add the fence as shared to the reservation object.
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_release.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_cs.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_vm.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_execbuf_util.c | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 5 | ||||
-rw-r--r-- | include/drm/ttm/ttm_execbuf_util.h | 2 |
7 files changed, 30 insertions, 3 deletions
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index a6e19c83143e..446e71ca36cb 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c | |||
@@ -226,6 +226,7 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo) | |||
226 | 226 | ||
227 | qxl_bo_ref(bo); | 227 | qxl_bo_ref(bo); |
228 | entry->tv.bo = &bo->tbo; | 228 | entry->tv.bo = &bo->tbo; |
229 | entry->tv.shared = false; | ||
229 | list_add_tail(&entry->tv.head, &release->bos); | 230 | list_add_tail(&entry->tv.head, &release->bos); |
230 | return 0; | 231 | return 0; |
231 | } | 232 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 6e3d1c8f3483..cd517ab93608 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -183,6 +183,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | |||
183 | } | 183 | } |
184 | 184 | ||
185 | p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; | 185 | p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; |
186 | p->relocs[i].tv.shared = false; | ||
186 | p->relocs[i].handle = r->handle; | 187 | p->relocs[i].handle = r->handle; |
187 | 188 | ||
188 | radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head, | 189 | radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head, |
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 671ee566aa51..1cce4468cd75 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c | |||
@@ -143,6 +143,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, | |||
143 | list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM; | 143 | list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM; |
144 | list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM; | 144 | list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM; |
145 | list[0].tv.bo = &vm->page_directory->tbo; | 145 | list[0].tv.bo = &vm->page_directory->tbo; |
146 | list[0].tv.shared = false; | ||
146 | list[0].tiling_flags = 0; | 147 | list[0].tiling_flags = 0; |
147 | list[0].handle = 0; | 148 | list[0].handle = 0; |
148 | list_add(&list[0].tv.head, head); | 149 | list_add(&list[0].tv.head, head); |
@@ -156,6 +157,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, | |||
156 | list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM; | 157 | list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM; |
157 | list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM; | 158 | list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM; |
158 | list[idx].tv.bo = &list[idx].robj->tbo; | 159 | list[idx].tv.bo = &list[idx].robj->tbo; |
160 | list[idx].tv.shared = false; | ||
159 | list[idx].tiling_flags = 0; | 161 | list[idx].tiling_flags = 0; |
160 | list[idx].handle = 0; | 162 | list[idx].handle = 0; |
161 | list_add(&list[idx++].tv.head, head); | 163 | list_add(&list[idx++].tv.head, head); |
@@ -395,6 +397,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev, | |||
395 | 397 | ||
396 | memset(&tv, 0, sizeof(tv)); | 398 | memset(&tv, 0, sizeof(tv)); |
397 | tv.bo = &bo->tbo; | 399 | tv.bo = &bo->tbo; |
400 | tv.shared = false; | ||
398 | 401 | ||
399 | INIT_LIST_HEAD(&head); | 402 | INIT_LIST_HEAD(&head); |
400 | list_add(&tv.head, &head); | 403 | list_add(&tv.head, &head); |
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index adafc0f8ec06..8ce508e76208 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c | |||
@@ -119,8 +119,14 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, | |||
119 | ret = -EBUSY; | 119 | ret = -EBUSY; |
120 | } | 120 | } |
121 | 121 | ||
122 | if (!ret) | 122 | if (!ret) { |
123 | continue; | 123 | if (!entry->shared) |
124 | continue; | ||
125 | |||
126 | ret = reservation_object_reserve_shared(bo->resv); | ||
127 | if (!ret) | ||
128 | continue; | ||
129 | } | ||
124 | 130 | ||
125 | /* uh oh, we lost out, drop every reservation and try | 131 | /* uh oh, we lost out, drop every reservation and try |
126 | * to only reserve this buffer, then start over if | 132 | * to only reserve this buffer, then start over if |
@@ -136,6 +142,9 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, | |||
136 | ret = 0; | 142 | ret = 0; |
137 | } | 143 | } |
138 | 144 | ||
145 | if (!ret && entry->shared) | ||
146 | ret = reservation_object_reserve_shared(bo->resv); | ||
147 | |||
139 | if (unlikely(ret != 0)) { | 148 | if (unlikely(ret != 0)) { |
140 | if (ret == -EINTR) | 149 | if (ret == -EINTR) |
141 | ret = -ERESTARTSYS; | 150 | ret = -ERESTARTSYS; |
@@ -183,7 +192,10 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, | |||
183 | 192 | ||
184 | list_for_each_entry(entry, list, head) { | 193 | list_for_each_entry(entry, list, head) { |
185 | bo = entry->bo; | 194 | bo = entry->bo; |
186 | reservation_object_add_excl_fence(bo->resv, fence); | 195 | if (entry->shared) |
196 | reservation_object_add_shared_fence(bo->resv, fence); | ||
197 | else | ||
198 | reservation_object_add_excl_fence(bo->resv, fence); | ||
187 | ttm_bo_add_to_lru(bo); | 199 | ttm_bo_add_to_lru(bo); |
188 | __ttm_bo_unreserve(bo); | 200 | __ttm_bo_unreserve(bo); |
189 | } | 201 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 0ceaddc8e4f7..b4de3b2a7cc5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -346,6 +346,7 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, | |||
346 | ++sw_context->cur_val_buf; | 346 | ++sw_context->cur_val_buf; |
347 | val_buf = &vval_buf->base; | 347 | val_buf = &vval_buf->base; |
348 | val_buf->bo = ttm_bo_reference(bo); | 348 | val_buf->bo = ttm_bo_reference(bo); |
349 | val_buf->shared = false; | ||
349 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); | 350 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); |
350 | vval_buf->validate_as_mob = validate_as_mob; | 351 | vval_buf->validate_as_mob = validate_as_mob; |
351 | } | 352 | } |
@@ -2670,9 +2671,11 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, | |||
2670 | INIT_LIST_HEAD(&validate_list); | 2671 | INIT_LIST_HEAD(&validate_list); |
2671 | 2672 | ||
2672 | pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); | 2673 | pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); |
2674 | pinned_val.shared = false; | ||
2673 | list_add_tail(&pinned_val.head, &validate_list); | 2675 | list_add_tail(&pinned_val.head, &validate_list); |
2674 | 2676 | ||
2675 | query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); | 2677 | query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); |
2678 | query_val.shared = false; | ||
2676 | list_add_tail(&query_val.head, &validate_list); | 2679 | list_add_tail(&query_val.head, &validate_list); |
2677 | 2680 | ||
2678 | ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false); | 2681 | ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index ff0e03b97753..26584316cb78 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -133,6 +133,7 @@ static void vmw_resource_release(struct kref *kref) | |||
133 | struct ttm_validate_buffer val_buf; | 133 | struct ttm_validate_buffer val_buf; |
134 | 134 | ||
135 | val_buf.bo = bo; | 135 | val_buf.bo = bo; |
136 | val_buf.shared = false; | ||
136 | res->func->unbind(res, false, &val_buf); | 137 | res->func->unbind(res, false, &val_buf); |
137 | } | 138 | } |
138 | res->backup_dirty = false; | 139 | res->backup_dirty = false; |
@@ -1219,6 +1220,7 @@ vmw_resource_check_buffer(struct vmw_resource *res, | |||
1219 | 1220 | ||
1220 | INIT_LIST_HEAD(&val_list); | 1221 | INIT_LIST_HEAD(&val_list); |
1221 | val_buf->bo = ttm_bo_reference(&res->backup->base); | 1222 | val_buf->bo = ttm_bo_reference(&res->backup->base); |
1223 | val_buf->shared = false; | ||
1222 | list_add_tail(&val_buf->head, &val_list); | 1224 | list_add_tail(&val_buf->head, &val_list); |
1223 | ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible); | 1225 | ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible); |
1224 | if (unlikely(ret != 0)) | 1226 | if (unlikely(ret != 0)) |
@@ -1312,6 +1314,7 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) | |||
1312 | BUG_ON(!func->may_evict); | 1314 | BUG_ON(!func->may_evict); |
1313 | 1315 | ||
1314 | val_buf.bo = NULL; | 1316 | val_buf.bo = NULL; |
1317 | val_buf.shared = false; | ||
1315 | ret = vmw_resource_check_buffer(res, interruptible, &val_buf); | 1318 | ret = vmw_resource_check_buffer(res, interruptible, &val_buf); |
1316 | if (unlikely(ret != 0)) | 1319 | if (unlikely(ret != 0)) |
1317 | return ret; | 1320 | return ret; |
@@ -1357,6 +1360,7 @@ int vmw_resource_validate(struct vmw_resource *res) | |||
1357 | return 0; | 1360 | return 0; |
1358 | 1361 | ||
1359 | val_buf.bo = NULL; | 1362 | val_buf.bo = NULL; |
1363 | val_buf.shared = false; | ||
1360 | if (res->backup) | 1364 | if (res->backup) |
1361 | val_buf.bo = &res->backup->base; | 1365 | val_buf.bo = &res->backup->base; |
1362 | do { | 1366 | do { |
@@ -1474,6 +1478,7 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo, | |||
1474 | struct ttm_validate_buffer val_buf; | 1478 | struct ttm_validate_buffer val_buf; |
1475 | 1479 | ||
1476 | val_buf.bo = bo; | 1480 | val_buf.bo = bo; |
1481 | val_buf.shared = false; | ||
1477 | 1482 | ||
1478 | list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) { | 1483 | list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) { |
1479 | 1484 | ||
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h index ff11a424f752..460441714413 100644 --- a/include/drm/ttm/ttm_execbuf_util.h +++ b/include/drm/ttm/ttm_execbuf_util.h | |||
@@ -39,11 +39,13 @@ | |||
39 | * | 39 | * |
40 | * @head: list head for thread-private list. | 40 | * @head: list head for thread-private list. |
41 | * @bo: refcounted buffer object pointer. | 41 | * @bo: refcounted buffer object pointer. |
42 | * @shared: should the fence be added shared? | ||
42 | */ | 43 | */ |
43 | 44 | ||
44 | struct ttm_validate_buffer { | 45 | struct ttm_validate_buffer { |
45 | struct list_head head; | 46 | struct list_head head; |
46 | struct ttm_buffer_object *bo; | 47 | struct ttm_buffer_object *bo; |
48 | bool shared; | ||
47 | }; | 49 | }; |
48 | 50 | ||
49 | /** | 51 | /** |