summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
diff options
context:
space:
mode:
authorKonsta Holtta <kholtta@nvidia.com>2015-03-05 06:18:30 -0500
committerDan Willemsen <dwillemsen@nvidia.com>2015-04-04 21:57:23 -0400
commit5f6cc1289e4282ac034bd97a67a86e05b82915d0 (patch)
treeeb3aafce96250a8a13e297b4c721588f28e743b1 /drivers/gpu/nvgpu/gk20a/mm_gk20a.c
parent325e0587d9180b05d59869679fc06b0ba979d973 (diff)
Revert "gpu: nvgpu: cache cde compbits buf mappings"
This reverts commit 9968badd26490a9d399f526fc57a9defd161dd6c. The commit accidentally introduced some memory leaks. Change-Id: I00d8d4452a152a8a2fe2d90fb949cdfee0de4c69 Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: http://git-master/r/714288 Reviewed-by: Juha Tukkinen <jtukkinen@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c71
1 files changed, 1 insertions, 70 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 77f71bc9..67680c4c 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -117,11 +117,6 @@ struct gk20a_dmabuf_priv {
117 int pin_count; 117 int pin_count;
118 118
119 struct list_head states; 119 struct list_head states;
120
121 /* cached cde compbits buf */
122 struct vm_gk20a *cde_vm;
123 u64 cde_map_vaddr;
124 int map_count;
125}; 120};
126 121
127static void gk20a_vm_remove_support_nofree(struct vm_gk20a *vm); 122static void gk20a_vm_remove_support_nofree(struct vm_gk20a *vm);
@@ -203,60 +198,6 @@ void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf,
203 mutex_unlock(&priv->lock); 198 mutex_unlock(&priv->lock);
204} 199}
205 200
206/* CDE compbits buf caching: keep compbit buffer mapped during user mappings.
207 * Call these four only after dma_buf has a drvdata allocated */
208
209u64 gk20a_vm_cde_mapped(struct vm_gk20a *vm, struct dma_buf *dmabuf)
210{
211 struct device *dev = dev_from_vm(vm);
212 struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev);
213 u64 map_vaddr;
214
215 mutex_lock(&priv->lock);
216 map_vaddr = priv->cde_map_vaddr;
217 mutex_unlock(&priv->lock);
218
219 return map_vaddr;
220}
221
222void gk20a_vm_mark_cde_mapped(struct vm_gk20a *vm, struct dma_buf *dmabuf,
223 u64 map_vaddr)
224{
225 struct device *dev = dev_from_vm(vm);
226 struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev);
227
228 mutex_lock(&priv->lock);
229 priv->cde_vm = vm;
230 priv->cde_map_vaddr = map_vaddr;
231 mutex_unlock(&priv->lock);
232}
233
234static void gk20a_vm_inc_maps(struct vm_gk20a *vm, struct dma_buf *dmabuf)
235{
236 struct device *dev = dev_from_vm(vm);
237 struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev);
238
239 mutex_lock(&priv->lock);
240 priv->map_count++;
241 mutex_unlock(&priv->lock);
242}
243
244static void gk20a_vm_dec_maps(struct vm_gk20a *vm, struct dma_buf *dmabuf,
245 struct vm_gk20a **cde_vm, u64 *cde_map_vaddr)
246{
247 struct device *dev = dev_from_vm(vm);
248 struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev);
249
250 mutex_lock(&priv->lock);
251 if (--priv->map_count == 0) {
252 *cde_vm = priv->cde_vm;
253 *cde_map_vaddr = priv->cde_map_vaddr;
254 priv->cde_vm = NULL;
255 priv->cde_map_vaddr = 0;
256 }
257 mutex_unlock(&priv->lock);
258}
259
260void gk20a_get_comptags(struct device *dev, struct dma_buf *dmabuf, 201void gk20a_get_comptags(struct device *dev, struct dma_buf *dmabuf,
261 struct gk20a_comptags *comptags) 202 struct gk20a_comptags *comptags)
262{ 203{
@@ -809,8 +750,6 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset)
809 struct device *d = dev_from_vm(vm); 750 struct device *d = dev_from_vm(vm);
810 int retries; 751 int retries;
811 struct mapped_buffer_node *mapped_buffer; 752 struct mapped_buffer_node *mapped_buffer;
812 struct vm_gk20a *cde_vm = NULL;
813 u64 cde_map_vaddr = 0;
814 753
815 mutex_lock(&vm->update_gmmu_lock); 754 mutex_lock(&vm->update_gmmu_lock);
816 755
@@ -843,15 +782,9 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset)
843 mapped_buffer->user_mapped--; 782 mapped_buffer->user_mapped--;
844 if (mapped_buffer->user_mapped == 0) 783 if (mapped_buffer->user_mapped == 0)
845 vm->num_user_mapped_buffers--; 784 vm->num_user_mapped_buffers--;
846
847 gk20a_vm_dec_maps(vm, mapped_buffer->dmabuf, &cde_vm, &cde_map_vaddr);
848
849 kref_put(&mapped_buffer->ref, gk20a_vm_unmap_locked_kref); 785 kref_put(&mapped_buffer->ref, gk20a_vm_unmap_locked_kref);
850 786
851 mutex_unlock(&vm->update_gmmu_lock); 787 mutex_unlock(&vm->update_gmmu_lock);
852
853 if (cde_map_vaddr)
854 gk20a_vm_unmap(cde_vm, cde_map_vaddr);
855} 788}
856 789
857u64 gk20a_vm_alloc_va(struct vm_gk20a *vm, 790u64 gk20a_vm_alloc_va(struct vm_gk20a *vm,
@@ -2665,9 +2598,7 @@ int gk20a_vm_map_buffer(struct vm_gk20a *vm,
2665 mapping_size); 2598 mapping_size);
2666 2599
2667 *offset_align = ret_va; 2600 *offset_align = ret_va;
2668 if (ret_va) { 2601 if (!ret_va) {
2669 gk20a_vm_inc_maps(vm, dmabuf);
2670 } else {
2671 dma_buf_put(dmabuf); 2602 dma_buf_put(dmabuf);
2672 err = -EINVAL; 2603 err = -EINVAL;
2673 } 2604 }