diff options
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 71 |
1 files changed, 1 insertions, 70 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 77f71bc9..67680c4c 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c | |||
@@ -117,11 +117,6 @@ struct gk20a_dmabuf_priv { | |||
117 | int pin_count; | 117 | int pin_count; |
118 | 118 | ||
119 | struct list_head states; | 119 | struct list_head states; |
120 | |||
121 | /* cached cde compbits buf */ | ||
122 | struct vm_gk20a *cde_vm; | ||
123 | u64 cde_map_vaddr; | ||
124 | int map_count; | ||
125 | }; | 120 | }; |
126 | 121 | ||
127 | static void gk20a_vm_remove_support_nofree(struct vm_gk20a *vm); | 122 | static void gk20a_vm_remove_support_nofree(struct vm_gk20a *vm); |
@@ -203,60 +198,6 @@ void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf, | |||
203 | mutex_unlock(&priv->lock); | 198 | mutex_unlock(&priv->lock); |
204 | } | 199 | } |
205 | 200 | ||
206 | /* CDE compbits buf caching: keep compbit buffer mapped during user mappings. | ||
207 | * Call these four only after dma_buf has a drvdata allocated */ | ||
208 | |||
209 | u64 gk20a_vm_cde_mapped(struct vm_gk20a *vm, struct dma_buf *dmabuf) | ||
210 | { | ||
211 | struct device *dev = dev_from_vm(vm); | ||
212 | struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev); | ||
213 | u64 map_vaddr; | ||
214 | |||
215 | mutex_lock(&priv->lock); | ||
216 | map_vaddr = priv->cde_map_vaddr; | ||
217 | mutex_unlock(&priv->lock); | ||
218 | |||
219 | return map_vaddr; | ||
220 | } | ||
221 | |||
222 | void gk20a_vm_mark_cde_mapped(struct vm_gk20a *vm, struct dma_buf *dmabuf, | ||
223 | u64 map_vaddr) | ||
224 | { | ||
225 | struct device *dev = dev_from_vm(vm); | ||
226 | struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev); | ||
227 | |||
228 | mutex_lock(&priv->lock); | ||
229 | priv->cde_vm = vm; | ||
230 | priv->cde_map_vaddr = map_vaddr; | ||
231 | mutex_unlock(&priv->lock); | ||
232 | } | ||
233 | |||
234 | static void gk20a_vm_inc_maps(struct vm_gk20a *vm, struct dma_buf *dmabuf) | ||
235 | { | ||
236 | struct device *dev = dev_from_vm(vm); | ||
237 | struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev); | ||
238 | |||
239 | mutex_lock(&priv->lock); | ||
240 | priv->map_count++; | ||
241 | mutex_unlock(&priv->lock); | ||
242 | } | ||
243 | |||
244 | static void gk20a_vm_dec_maps(struct vm_gk20a *vm, struct dma_buf *dmabuf, | ||
245 | struct vm_gk20a **cde_vm, u64 *cde_map_vaddr) | ||
246 | { | ||
247 | struct device *dev = dev_from_vm(vm); | ||
248 | struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev); | ||
249 | |||
250 | mutex_lock(&priv->lock); | ||
251 | if (--priv->map_count == 0) { | ||
252 | *cde_vm = priv->cde_vm; | ||
253 | *cde_map_vaddr = priv->cde_map_vaddr; | ||
254 | priv->cde_vm = NULL; | ||
255 | priv->cde_map_vaddr = 0; | ||
256 | } | ||
257 | mutex_unlock(&priv->lock); | ||
258 | } | ||
259 | |||
260 | void gk20a_get_comptags(struct device *dev, struct dma_buf *dmabuf, | 201 | void gk20a_get_comptags(struct device *dev, struct dma_buf *dmabuf, |
261 | struct gk20a_comptags *comptags) | 202 | struct gk20a_comptags *comptags) |
262 | { | 203 | { |
@@ -809,8 +750,6 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset) | |||
809 | struct device *d = dev_from_vm(vm); | 750 | struct device *d = dev_from_vm(vm); |
810 | int retries; | 751 | int retries; |
811 | struct mapped_buffer_node *mapped_buffer; | 752 | struct mapped_buffer_node *mapped_buffer; |
812 | struct vm_gk20a *cde_vm = NULL; | ||
813 | u64 cde_map_vaddr = 0; | ||
814 | 753 | ||
815 | mutex_lock(&vm->update_gmmu_lock); | 754 | mutex_lock(&vm->update_gmmu_lock); |
816 | 755 | ||
@@ -843,15 +782,9 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset) | |||
843 | mapped_buffer->user_mapped--; | 782 | mapped_buffer->user_mapped--; |
844 | if (mapped_buffer->user_mapped == 0) | 783 | if (mapped_buffer->user_mapped == 0) |
845 | vm->num_user_mapped_buffers--; | 784 | vm->num_user_mapped_buffers--; |
846 | |||
847 | gk20a_vm_dec_maps(vm, mapped_buffer->dmabuf, &cde_vm, &cde_map_vaddr); | ||
848 | |||
849 | kref_put(&mapped_buffer->ref, gk20a_vm_unmap_locked_kref); | 785 | kref_put(&mapped_buffer->ref, gk20a_vm_unmap_locked_kref); |
850 | 786 | ||
851 | mutex_unlock(&vm->update_gmmu_lock); | 787 | mutex_unlock(&vm->update_gmmu_lock); |
852 | |||
853 | if (cde_map_vaddr) | ||
854 | gk20a_vm_unmap(cde_vm, cde_map_vaddr); | ||
855 | } | 788 | } |
856 | 789 | ||
857 | u64 gk20a_vm_alloc_va(struct vm_gk20a *vm, | 790 | u64 gk20a_vm_alloc_va(struct vm_gk20a *vm, |
@@ -2665,9 +2598,7 @@ int gk20a_vm_map_buffer(struct vm_gk20a *vm, | |||
2665 | mapping_size); | 2598 | mapping_size); |
2666 | 2599 | ||
2667 | *offset_align = ret_va; | 2600 | *offset_align = ret_va; |
2668 | if (ret_va) { | 2601 | if (!ret_va) { |
2669 | gk20a_vm_inc_maps(vm, dmabuf); | ||
2670 | } else { | ||
2671 | dma_buf_put(dmabuf); | 2602 | dma_buf_put(dmabuf); |
2672 | err = -EINVAL; | 2603 | err = -EINVAL; |
2673 | } | 2604 | } |