diff options
author | Thomas Hellstrom <thellstrom@vmware.com> | 2015-08-10 13:39:35 -0400 |
---|---|---|
committer | Thomas Hellstrom <thellstrom@vmware.com> | 2015-08-12 13:06:32 -0400 |
commit | d80efd5cb3dec16a8d1aea9b8a4a7921972dba65 (patch) | |
tree | 7330bd6473aff84e61ebf2f89f629abab3acd3a6 /drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |
parent | 8ce75f8ab9044fe11caaaf2b2c82471023212f9f (diff) |
drm/vmwgfx: Initial DX support
Initial DX support.
Co-authored with Sinclair Yeh, Charmaine Lee and Jakob Bornecrantz.
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Sinclair Yeh <syeh@vmware.com>
Signed-off-by: Charmaine Lee <charmainel@vmware.com>
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_resource.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 48 |
1 files changed, 34 insertions, 14 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index be2809aaa7cb..6186e859dab0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <drm/ttm/ttm_placement.h> | 31 | #include <drm/ttm/ttm_placement.h> |
32 | #include <drm/drmP.h> | 32 | #include <drm/drmP.h> |
33 | #include "vmwgfx_resource_priv.h" | 33 | #include "vmwgfx_resource_priv.h" |
34 | #include "vmwgfx_binding.h" | ||
34 | 35 | ||
35 | #define VMW_RES_EVICT_ERR_COUNT 10 | 36 | #define VMW_RES_EVICT_ERR_COUNT 10 |
36 | 37 | ||
@@ -144,10 +145,10 @@ static void vmw_resource_release(struct kref *kref) | |||
144 | } | 145 | } |
145 | 146 | ||
146 | if (likely(res->hw_destroy != NULL)) { | 147 | if (likely(res->hw_destroy != NULL)) { |
147 | res->hw_destroy(res); | ||
148 | mutex_lock(&dev_priv->binding_mutex); | 148 | mutex_lock(&dev_priv->binding_mutex); |
149 | vmw_context_binding_res_list_kill(&res->binding_head); | 149 | vmw_binding_res_list_kill(&res->binding_head); |
150 | mutex_unlock(&dev_priv->binding_mutex); | 150 | mutex_unlock(&dev_priv->binding_mutex); |
151 | res->hw_destroy(res); | ||
151 | } | 152 | } |
152 | 153 | ||
153 | id = res->id; | 154 | id = res->id; |
@@ -1149,14 +1150,16 @@ out_bind_failed: | |||
1149 | * command submission. | 1150 | * command submission. |
1150 | * | 1151 | * |
1151 | * @res: Pointer to the struct vmw_resource to unreserve. | 1152 | * @res: Pointer to the struct vmw_resource to unreserve. |
1153 | * @switch_backup: Backup buffer has been switched. | ||
1152 | * @new_backup: Pointer to new backup buffer if command submission | 1154 | * @new_backup: Pointer to new backup buffer if command submission |
1153 | * switched. | 1155 | * switched. May be NULL. |
1154 | * @new_backup_offset: New backup offset if @new_backup is !NULL. | 1156 | * @new_backup_offset: New backup offset if @switch_backup is true. |
1155 | * | 1157 | * |
1156 | * Currently unreserving a resource means putting it back on the device's | 1158 | * Currently unreserving a resource means putting it back on the device's |
1157 | * resource lru list, so that it can be evicted if necessary. | 1159 | * resource lru list, so that it can be evicted if necessary. |
1158 | */ | 1160 | */ |
1159 | void vmw_resource_unreserve(struct vmw_resource *res, | 1161 | void vmw_resource_unreserve(struct vmw_resource *res, |
1162 | bool switch_backup, | ||
1160 | struct vmw_dma_buffer *new_backup, | 1163 | struct vmw_dma_buffer *new_backup, |
1161 | unsigned long new_backup_offset) | 1164 | unsigned long new_backup_offset) |
1162 | { | 1165 | { |
@@ -1165,19 +1168,22 @@ void vmw_resource_unreserve(struct vmw_resource *res, | |||
1165 | if (!list_empty(&res->lru_head)) | 1168 | if (!list_empty(&res->lru_head)) |
1166 | return; | 1169 | return; |
1167 | 1170 | ||
1168 | if (new_backup && new_backup != res->backup) { | 1171 | if (switch_backup && new_backup != res->backup) { |
1169 | |||
1170 | if (res->backup) { | 1172 | if (res->backup) { |
1171 | lockdep_assert_held(&res->backup->base.resv->lock.base); | 1173 | lockdep_assert_held(&res->backup->base.resv->lock.base); |
1172 | list_del_init(&res->mob_head); | 1174 | list_del_init(&res->mob_head); |
1173 | vmw_dmabuf_unreference(&res->backup); | 1175 | vmw_dmabuf_unreference(&res->backup); |
1174 | } | 1176 | } |
1175 | 1177 | ||
1176 | res->backup = vmw_dmabuf_reference(new_backup); | 1178 | if (new_backup) { |
1177 | lockdep_assert_held(&new_backup->base.resv->lock.base); | 1179 | res->backup = vmw_dmabuf_reference(new_backup); |
1178 | list_add_tail(&res->mob_head, &new_backup->res_list); | 1180 | lockdep_assert_held(&new_backup->base.resv->lock.base); |
1181 | list_add_tail(&res->mob_head, &new_backup->res_list); | ||
1182 | } else { | ||
1183 | res->backup = NULL; | ||
1184 | } | ||
1179 | } | 1185 | } |
1180 | if (new_backup) | 1186 | if (switch_backup) |
1181 | res->backup_offset = new_backup_offset; | 1187 | res->backup_offset = new_backup_offset; |
1182 | 1188 | ||
1183 | if (!res->func->may_evict || res->id == -1 || res->pin_count) | 1189 | if (!res->func->may_evict || res->id == -1 || res->pin_count) |
@@ -1269,8 +1275,12 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, | |||
1269 | if (res->func->needs_backup && res->backup == NULL && | 1275 | if (res->func->needs_backup && res->backup == NULL && |
1270 | !no_backup) { | 1276 | !no_backup) { |
1271 | ret = vmw_resource_buf_alloc(res, interruptible); | 1277 | ret = vmw_resource_buf_alloc(res, interruptible); |
1272 | if (unlikely(ret != 0)) | 1278 | if (unlikely(ret != 0)) { |
1279 | DRM_ERROR("Failed to allocate a backup buffer " | ||
1280 | "of size %lu. bytes\n", | ||
1281 | (unsigned long) res->backup_size); | ||
1273 | return ret; | 1282 | return ret; |
1283 | } | ||
1274 | } | 1284 | } |
1275 | 1285 | ||
1276 | return 0; | 1286 | return 0; |
@@ -1354,7 +1364,7 @@ int vmw_resource_validate(struct vmw_resource *res) | |||
1354 | struct ttm_validate_buffer val_buf; | 1364 | struct ttm_validate_buffer val_buf; |
1355 | unsigned err_count = 0; | 1365 | unsigned err_count = 0; |
1356 | 1366 | ||
1357 | if (likely(!res->func->may_evict)) | 1367 | if (!res->func->create) |
1358 | return 0; | 1368 | return 0; |
1359 | 1369 | ||
1360 | val_buf.bo = NULL; | 1370 | val_buf.bo = NULL; |
@@ -1624,7 +1634,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible) | |||
1624 | res->pin_count++; | 1634 | res->pin_count++; |
1625 | 1635 | ||
1626 | out_no_validate: | 1636 | out_no_validate: |
1627 | vmw_resource_unreserve(res, NULL, 0UL); | 1637 | vmw_resource_unreserve(res, false, NULL, 0UL); |
1628 | out_no_reserve: | 1638 | out_no_reserve: |
1629 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 1639 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
1630 | ttm_write_unlock(&dev_priv->reservation_sem); | 1640 | ttm_write_unlock(&dev_priv->reservation_sem); |
@@ -1660,8 +1670,18 @@ void vmw_resource_unpin(struct vmw_resource *res) | |||
1660 | ttm_bo_unreserve(&vbo->base); | 1670 | ttm_bo_unreserve(&vbo->base); |
1661 | } | 1671 | } |
1662 | 1672 | ||
1663 | vmw_resource_unreserve(res, NULL, 0UL); | 1673 | vmw_resource_unreserve(res, false, NULL, 0UL); |
1664 | 1674 | ||
1665 | mutex_unlock(&dev_priv->cmdbuf_mutex); | 1675 | mutex_unlock(&dev_priv->cmdbuf_mutex); |
1666 | ttm_read_unlock(&dev_priv->reservation_sem); | 1676 | ttm_read_unlock(&dev_priv->reservation_sem); |
1667 | } | 1677 | } |
1678 | |||
1679 | /** | ||
1680 | * vmw_res_type - Return the resource type | ||
1681 | * | ||
1682 | * @res: Pointer to the resource | ||
1683 | */ | ||
1684 | enum vmw_res_type vmw_res_type(const struct vmw_resource *res) | ||
1685 | { | ||
1686 | return res->func->res_type; | ||
1687 | } | ||