aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2018-09-26 09:41:52 -0400
committerThomas Hellstrom <thellstrom@vmware.com>2018-09-27 09:21:37 -0400
commit13289241fe8b8c336ec8277b9c4643ea7fbb2f70 (patch)
tree915ef1c2f199dc1c1c285835bc8ebe89c07b44f4
parentd76ce03e1a7870ca6351610cf30bcf62949ea900 (diff)
drm/vmwgfx: Remove the resource avail field
This field was previously used to prevent a lookup of a resource before its constructor had run to its end. This was mainly intended for an interface that is now removed that allowed looking up a resource by its device id. Currently all affected resources are added to the lookup mechanism (its TTM prime object is initialized) late in the constructor where it's OK to look up the resource. This means we can change the device resource_lock to an ordinary spinlock instead of an rwlock and remove a locking sequence during lookup. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Sinclair Yeh <syeh@vmware.com> Reviewed-by: Deepak Rawat <drawat@vmware.com>
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h44
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c75
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c2
10 files changed, 68 insertions, 73 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 7c3cb8efd11a..4d502567d24c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -217,9 +217,7 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
217 } 217 }
218 } 218 }
219 219
220 220 res->hw_destroy = vmw_hw_context_destroy;
221
222 vmw_resource_activate(res, vmw_hw_context_destroy);
223 return 0; 221 return 0;
224 222
225out_cotables: 223out_cotables:
@@ -274,7 +272,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
274 272
275 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 273 vmw_fifo_commit(dev_priv, sizeof(*cmd));
276 vmw_fifo_resource_inc(dev_priv); 274 vmw_fifo_resource_inc(dev_priv);
277 vmw_resource_activate(res, vmw_hw_context_destroy); 275 res->hw_destroy = vmw_hw_context_destroy;
278 return 0; 276 return 0;
279 277
280out_early: 278out_early:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 1d45714e1d5a..44f3f6f107d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -615,7 +615,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
615 vcotbl->type = type; 615 vcotbl->type = type;
616 vcotbl->ctx = ctx; 616 vcotbl->ctx = ctx;
617 617
618 vmw_resource_activate(&vcotbl->res, vmw_hw_cotable_destroy); 618 vcotbl->res.hw_destroy = vmw_hw_cotable_destroy;
619 619
620 return &vcotbl->res; 620 return &vcotbl->res;
621 621
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index d9c178e235b4..61a84b958d67 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -667,8 +667,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
667 mutex_init(&dev_priv->binding_mutex); 667 mutex_init(&dev_priv->binding_mutex);
668 mutex_init(&dev_priv->requested_layout_mutex); 668 mutex_init(&dev_priv->requested_layout_mutex);
669 mutex_init(&dev_priv->global_kms_state_mutex); 669 mutex_init(&dev_priv->global_kms_state_mutex);
670 rwlock_init(&dev_priv->resource_lock);
671 ttm_lock_init(&dev_priv->reservation_sem); 670 ttm_lock_init(&dev_priv->reservation_sem);
671 spin_lock_init(&dev_priv->resource_lock);
672 spin_lock_init(&dev_priv->hw_lock); 672 spin_lock_init(&dev_priv->hw_lock);
673 spin_lock_init(&dev_priv->waiter_lock); 673 spin_lock_init(&dev_priv->waiter_lock);
674 spin_lock_init(&dev_priv->cap_lock); 674 spin_lock_init(&dev_priv->cap_lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 61866294147e..d83bb70627ec 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -113,21 +113,49 @@ struct vmw_validate_buffer {
113}; 113};
114 114
115struct vmw_res_func; 115struct vmw_res_func;
116
117
118/**
119 * struct vmw-resource - base class for hardware resources
120 *
121 * @kref: For refcounting.
122 * @dev_priv: Pointer to the device private for this resource. Immutable.
123 * @id: Device id. Protected by @dev_priv::resource_lock.
124 * @backup_size: Backup buffer size. Immutable.
125 * @res_dirty: Resource contains data not yet in the backup buffer. Protected
126 * by resource reserved.
127 * @backup_dirty: Backup buffer contains data not yet in the HW resource.
128 * Protecte by resource reserved.
129 * @backup: The backup buffer if any. Protected by resource reserved.
130 * @backup_offset: Offset into the backup buffer if any. Protected by resource
131 * reserved. Note that only a few resource types can have a @backup_offset
132 * different from zero.
133 * @pin_count: The pin count for this resource. A pinned resource has a
134 * pin-count greater than zero. It is not on the resource LRU lists and its
135 * backup buffer is pinned. Hence it can't be evicted.
136 * @func: Method vtable for this resource. Immutable.
137 * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock.
138 * @mob_head: List head for the MOB backup list. Protected by @backup reserved.
139 * @binding_head: List head for the context binding list. Protected by
140 * the @dev_priv::binding_mutex
141 * @res_free: The resource destructor.
142 * @hw_destroy: Callback to destroy the resource on the device, as part of
143 * resource destruction.
144 */
116struct vmw_resource { 145struct vmw_resource {
117 struct kref kref; 146 struct kref kref;
118 struct vmw_private *dev_priv; 147 struct vmw_private *dev_priv;
119 int id; 148 int id;
120 bool avail;
121 unsigned long backup_size; 149 unsigned long backup_size;
122 bool res_dirty; /* Protected by backup buffer reserved */ 150 bool res_dirty;
123 bool backup_dirty; /* Protected by backup buffer reserved */ 151 bool backup_dirty;
124 struct vmw_buffer_object *backup; 152 struct vmw_buffer_object *backup;
125 unsigned long backup_offset; 153 unsigned long backup_offset;
126 unsigned long pin_count; /* Protected by resource reserved */ 154 unsigned long pin_count;
127 const struct vmw_res_func *func; 155 const struct vmw_res_func *func;
128 struct list_head lru_head; /* Protected by the resource lock */ 156 struct list_head lru_head;
129 struct list_head mob_head; /* Protected by @backup reserved */ 157 struct list_head mob_head;
130 struct list_head binding_head; /* Protected by binding_mutex */ 158 struct list_head binding_head;
131 void (*res_free) (struct vmw_resource *res); 159 void (*res_free) (struct vmw_resource *res);
132 void (*hw_destroy) (struct vmw_resource *res); 160 void (*hw_destroy) (struct vmw_resource *res);
133}; 161};
@@ -471,7 +499,7 @@ struct vmw_private {
471 * Context and surface management. 499 * Context and surface management.
472 */ 500 */
473 501
474 rwlock_t resource_lock; 502 spinlock_t resource_lock;
475 struct idr res_idr[vmw_res_max]; 503 struct idr res_idr[vmw_res_max];
476 /* 504 /*
477 * Block lastclose from racing with firstopen. 505 * Block lastclose from racing with firstopen.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 315b3d60567d..55df79eccd57 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -58,11 +58,11 @@ void vmw_resource_release_id(struct vmw_resource *res)
58 struct vmw_private *dev_priv = res->dev_priv; 58 struct vmw_private *dev_priv = res->dev_priv;
59 struct idr *idr = &dev_priv->res_idr[res->func->res_type]; 59 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
60 60
61 write_lock(&dev_priv->resource_lock); 61 spin_lock(&dev_priv->resource_lock);
62 if (res->id != -1) 62 if (res->id != -1)
63 idr_remove(idr, res->id); 63 idr_remove(idr, res->id);
64 res->id = -1; 64 res->id = -1;
65 write_unlock(&dev_priv->resource_lock); 65 spin_unlock(&dev_priv->resource_lock);
66} 66}
67 67
68static void vmw_resource_release(struct kref *kref) 68static void vmw_resource_release(struct kref *kref)
@@ -73,10 +73,9 @@ static void vmw_resource_release(struct kref *kref)
73 int id; 73 int id;
74 struct idr *idr = &dev_priv->res_idr[res->func->res_type]; 74 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
75 75
76 write_lock(&dev_priv->resource_lock); 76 spin_lock(&dev_priv->resource_lock);
77 res->avail = false;
78 list_del_init(&res->lru_head); 77 list_del_init(&res->lru_head);
79 write_unlock(&dev_priv->resource_lock); 78 spin_unlock(&dev_priv->resource_lock);
80 if (res->backup) { 79 if (res->backup) {
81 struct ttm_buffer_object *bo = &res->backup->base; 80 struct ttm_buffer_object *bo = &res->backup->base;
82 81
@@ -108,10 +107,10 @@ static void vmw_resource_release(struct kref *kref)
108 else 107 else
109 kfree(res); 108 kfree(res);
110 109
111 write_lock(&dev_priv->resource_lock); 110 spin_lock(&dev_priv->resource_lock);
112 if (id != -1) 111 if (id != -1)
113 idr_remove(idr, id); 112 idr_remove(idr, id);
114 write_unlock(&dev_priv->resource_lock); 113 spin_unlock(&dev_priv->resource_lock);
115} 114}
116 115
117void vmw_resource_unreference(struct vmw_resource **p_res) 116void vmw_resource_unreference(struct vmw_resource **p_res)
@@ -140,13 +139,13 @@ int vmw_resource_alloc_id(struct vmw_resource *res)
140 BUG_ON(res->id != -1); 139 BUG_ON(res->id != -1);
141 140
142 idr_preload(GFP_KERNEL); 141 idr_preload(GFP_KERNEL);
143 write_lock(&dev_priv->resource_lock); 142 spin_lock(&dev_priv->resource_lock);
144 143
145 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT); 144 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
146 if (ret >= 0) 145 if (ret >= 0)
147 res->id = ret; 146 res->id = ret;
148 147
149 write_unlock(&dev_priv->resource_lock); 148 spin_unlock(&dev_priv->resource_lock);
150 idr_preload_end(); 149 idr_preload_end();
151 return ret < 0 ? ret : 0; 150 return ret < 0 ? ret : 0;
152} 151}
@@ -170,7 +169,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
170 kref_init(&res->kref); 169 kref_init(&res->kref);
171 res->hw_destroy = NULL; 170 res->hw_destroy = NULL;
172 res->res_free = res_free; 171 res->res_free = res_free;
173 res->avail = false;
174 res->dev_priv = dev_priv; 172 res->dev_priv = dev_priv;
175 res->func = func; 173 res->func = func;
176 INIT_LIST_HEAD(&res->lru_head); 174 INIT_LIST_HEAD(&res->lru_head);
@@ -187,28 +185,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
187 return vmw_resource_alloc_id(res); 185 return vmw_resource_alloc_id(res);
188} 186}
189 187
190/**
191 * vmw_resource_activate
192 *
193 * @res: Pointer to the newly created resource
194 * @hw_destroy: Destroy function. NULL if none.
195 *
196 * Activate a resource after the hardware has been made aware of it.
197 * Set tye destroy function to @destroy. Typically this frees the
198 * resource and destroys the hardware resources associated with it.
199 * Activate basically means that the function vmw_resource_lookup will
200 * find it.
201 */
202void vmw_resource_activate(struct vmw_resource *res,
203 void (*hw_destroy) (struct vmw_resource *))
204{
205 struct vmw_private *dev_priv = res->dev_priv;
206
207 write_lock(&dev_priv->resource_lock);
208 res->avail = true;
209 res->hw_destroy = hw_destroy;
210 write_unlock(&dev_priv->resource_lock);
211}
212 188
213/** 189/**
214 * vmw_user_resource_lookup_handle - lookup a struct resource from a 190 * vmw_user_resource_lookup_handle - lookup a struct resource from a
@@ -243,15 +219,10 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
243 goto out_bad_resource; 219 goto out_bad_resource;
244 220
245 res = converter->base_obj_to_res(base); 221 res = converter->base_obj_to_res(base);
246 222 if (res->res_free != converter->res_free)
247 read_lock(&dev_priv->resource_lock);
248 if (!res->avail || res->res_free != converter->res_free) {
249 read_unlock(&dev_priv->resource_lock);
250 goto out_bad_resource; 223 goto out_bad_resource;
251 }
252 224
253 kref_get(&res->kref); 225 kref_get(&res->kref);
254 read_unlock(&dev_priv->resource_lock);
255 226
256 *p_res = res; 227 *p_res = res;
257 ret = 0; 228 ret = 0;
@@ -422,10 +393,10 @@ void vmw_resource_unreserve(struct vmw_resource *res,
422 if (!res->func->may_evict || res->id == -1 || res->pin_count) 393 if (!res->func->may_evict || res->id == -1 || res->pin_count)
423 return; 394 return;
424 395
425 write_lock(&dev_priv->resource_lock); 396 spin_lock(&dev_priv->resource_lock);
426 list_add_tail(&res->lru_head, 397 list_add_tail(&res->lru_head,
427 &res->dev_priv->res_lru[res->func->res_type]); 398 &res->dev_priv->res_lru[res->func->res_type]);
428 write_unlock(&dev_priv->resource_lock); 399 spin_unlock(&dev_priv->resource_lock);
429} 400}
430 401
431/** 402/**
@@ -504,9 +475,9 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
504 struct vmw_private *dev_priv = res->dev_priv; 475 struct vmw_private *dev_priv = res->dev_priv;
505 int ret; 476 int ret;
506 477
507 write_lock(&dev_priv->resource_lock); 478 spin_lock(&dev_priv->resource_lock);
508 list_del_init(&res->lru_head); 479 list_del_init(&res->lru_head);
509 write_unlock(&dev_priv->resource_lock); 480 spin_unlock(&dev_priv->resource_lock);
510 481
511 if (res->func->needs_backup && res->backup == NULL && 482 if (res->func->needs_backup && res->backup == NULL &&
512 !no_backup) { 483 !no_backup) {
@@ -619,12 +590,12 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr)
619 if (likely(ret != -EBUSY)) 590 if (likely(ret != -EBUSY))
620 break; 591 break;
621 592
622 write_lock(&dev_priv->resource_lock); 593 spin_lock(&dev_priv->resource_lock);
623 if (list_empty(lru_list) || !res->func->may_evict) { 594 if (list_empty(lru_list) || !res->func->may_evict) {
624 DRM_ERROR("Out of device device resources " 595 DRM_ERROR("Out of device device resources "
625 "for %s.\n", res->func->type_name); 596 "for %s.\n", res->func->type_name);
626 ret = -EBUSY; 597 ret = -EBUSY;
627 write_unlock(&dev_priv->resource_lock); 598 spin_unlock(&dev_priv->resource_lock);
628 break; 599 break;
629 } 600 }
630 601
@@ -633,14 +604,14 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr)
633 lru_head)); 604 lru_head));
634 list_del_init(&evict_res->lru_head); 605 list_del_init(&evict_res->lru_head);
635 606
636 write_unlock(&dev_priv->resource_lock); 607 spin_unlock(&dev_priv->resource_lock);
637 608
638 /* Trylock backup buffers with a NULL ticket. */ 609 /* Trylock backup buffers with a NULL ticket. */
639 ret = vmw_resource_do_evict(NULL, evict_res, intr); 610 ret = vmw_resource_do_evict(NULL, evict_res, intr);
640 if (unlikely(ret != 0)) { 611 if (unlikely(ret != 0)) {
641 write_lock(&dev_priv->resource_lock); 612 spin_lock(&dev_priv->resource_lock);
642 list_add_tail(&evict_res->lru_head, lru_list); 613 list_add_tail(&evict_res->lru_head, lru_list);
643 write_unlock(&dev_priv->resource_lock); 614 spin_unlock(&dev_priv->resource_lock);
644 if (ret == -ERESTARTSYS || 615 if (ret == -ERESTARTSYS ||
645 ++err_count > VMW_RES_EVICT_ERR_COUNT) { 616 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
646 vmw_resource_unreference(&evict_res); 617 vmw_resource_unreference(&evict_res);
@@ -822,7 +793,7 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
822 struct ww_acquire_ctx ticket; 793 struct ww_acquire_ctx ticket;
823 794
824 do { 795 do {
825 write_lock(&dev_priv->resource_lock); 796 spin_lock(&dev_priv->resource_lock);
826 797
827 if (list_empty(lru_list)) 798 if (list_empty(lru_list))
828 goto out_unlock; 799 goto out_unlock;
@@ -831,14 +802,14 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
831 list_first_entry(lru_list, struct vmw_resource, 802 list_first_entry(lru_list, struct vmw_resource,
832 lru_head)); 803 lru_head));
833 list_del_init(&evict_res->lru_head); 804 list_del_init(&evict_res->lru_head);
834 write_unlock(&dev_priv->resource_lock); 805 spin_unlock(&dev_priv->resource_lock);
835 806
836 /* Wait lock backup buffers with a ticket. */ 807 /* Wait lock backup buffers with a ticket. */
837 ret = vmw_resource_do_evict(&ticket, evict_res, false); 808 ret = vmw_resource_do_evict(&ticket, evict_res, false);
838 if (unlikely(ret != 0)) { 809 if (unlikely(ret != 0)) {
839 write_lock(&dev_priv->resource_lock); 810 spin_lock(&dev_priv->resource_lock);
840 list_add_tail(&evict_res->lru_head, lru_list); 811 list_add_tail(&evict_res->lru_head, lru_list);
841 write_unlock(&dev_priv->resource_lock); 812 spin_unlock(&dev_priv->resource_lock);
842 if (++err_count > VMW_RES_EVICT_ERR_COUNT) { 813 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
843 vmw_resource_unreference(&evict_res); 814 vmw_resource_unreference(&evict_res);
844 return; 815 return;
@@ -849,7 +820,7 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv,
849 } while (1); 820 } while (1);
850 821
851out_unlock: 822out_unlock:
852 write_unlock(&dev_priv->resource_lock); 823 spin_unlock(&dev_priv->resource_lock);
853} 824}
854 825
855/** 826/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
index a8c1c5ebd71d..645370868296 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -120,8 +120,6 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
120 bool delay_id, 120 bool delay_id,
121 void (*res_free) (struct vmw_resource *res), 121 void (*res_free) (struct vmw_resource *res),
122 const struct vmw_res_func *func); 122 const struct vmw_res_func *func);
123void vmw_resource_activate(struct vmw_resource *res,
124 void (*hw_destroy) (struct vmw_resource *));
125int 123int
126vmw_simple_resource_create_ioctl(struct drm_device *dev, 124vmw_simple_resource_create_ioctl(struct drm_device *dev,
127 void *data, 125 void *data,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index e03431aef3d0..c72b4351176a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -186,7 +186,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
186 shader->num_input_sig = num_input_sig; 186 shader->num_input_sig = num_input_sig;
187 shader->num_output_sig = num_output_sig; 187 shader->num_output_sig = num_output_sig;
188 188
189 vmw_resource_activate(res, vmw_hw_shader_destroy); 189 res->hw_destroy = vmw_hw_shader_destroy;
190 return 0; 190 return 0;
191} 191}
192 192
@@ -656,7 +656,7 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
656 goto out_resource_init; 656 goto out_resource_init;
657 657
658 res->id = shader->id; 658 res->id = shader->id;
659 vmw_resource_activate(res, vmw_hw_shader_destroy); 659 res->hw_destroy = vmw_hw_shader_destroy;
660 660
661out_resource_init: 661out_resource_init:
662 vmw_resource_unreference(&res); 662 vmw_resource_unreference(&res);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
index 6ebc5affde14..3bd60f7a9d6d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
@@ -81,7 +81,7 @@ static int vmw_simple_resource_init(struct vmw_private *dev_priv,
81 return ret; 81 return ret;
82 } 82 }
83 83
84 vmw_resource_activate(&simple->res, simple->func->hw_destroy); 84 simple->res.hw_destroy = simple->func->hw_destroy;
85 85
86 return 0; 86 return 0;
87} 87}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index a01de4845eb7..aaabb87ac3af 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -386,7 +386,7 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
386 goto out_resource_init; 386 goto out_resource_init;
387 387
388 res->id = view->view_id; 388 res->id = view->view_id;
389 vmw_resource_activate(res, vmw_hw_view_destroy); 389 res->hw_destroy = vmw_hw_view_destroy;
390 390
391out_resource_init: 391out_resource_init:
392 vmw_resource_unreference(&res); 392 vmw_resource_unreference(&res);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index e125233e074b..bd4cf995089c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -614,7 +614,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
614 */ 614 */
615 615
616 INIT_LIST_HEAD(&srf->view_list); 616 INIT_LIST_HEAD(&srf->view_list);
617 vmw_resource_activate(res, vmw_hw_surface_destroy); 617 res->hw_destroy = vmw_hw_surface_destroy;
618 return ret; 618 return ret;
619} 619}
620 620