diff options
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux')
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/dma.c | 60 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/nvgpu_mem.c | 2 |
2 files changed, 32 insertions, 30 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/dma.c b/drivers/gpu/nvgpu/common/linux/dma.c index 2a75ad13..832d0f47 100644 --- a/drivers/gpu/nvgpu/common/linux/dma.c +++ b/drivers/gpu/nvgpu/common/linux/dma.c | |||
@@ -107,10 +107,10 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags, | |||
107 | nvgpu_dma_flags_to_attrs(&dma_attrs, flags); | 107 | nvgpu_dma_flags_to_attrs(&dma_attrs, flags); |
108 | 108 | ||
109 | if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) { | 109 | if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) { |
110 | mem->pages = dma_alloc_attrs(d, | 110 | mem->priv.pages = dma_alloc_attrs(d, |
111 | size, &iova, GFP_KERNEL, | 111 | size, &iova, GFP_KERNEL, |
112 | __DMA_ATTR(dma_attrs)); | 112 | __DMA_ATTR(dma_attrs)); |
113 | if (!mem->pages) | 113 | if (!mem->priv.pages) |
114 | return -ENOMEM; | 114 | return -ENOMEM; |
115 | } else { | 115 | } else { |
116 | mem->cpu_va = dma_alloc_attrs(d, | 116 | mem->cpu_va = dma_alloc_attrs(d, |
@@ -126,10 +126,12 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags, | |||
126 | } | 126 | } |
127 | 127 | ||
128 | if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) | 128 | if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) |
129 | err = gk20a_get_sgtable_from_pages(d, &mem->sgt, mem->pages, | 129 | err = gk20a_get_sgtable_from_pages(d, &mem->priv.sgt, |
130 | mem->priv.pages, | ||
130 | iova, size); | 131 | iova, size); |
131 | else { | 132 | else { |
132 | err = gk20a_get_sgtable(d, &mem->sgt, mem->cpu_va, iova, size); | 133 | err = gk20a_get_sgtable(d, &mem->priv.sgt, mem->cpu_va, |
134 | iova, size); | ||
133 | memset(mem->cpu_va, 0, size); | 135 | memset(mem->cpu_va, 0, size); |
134 | } | 136 | } |
135 | if (err) | 137 | if (err) |
@@ -137,7 +139,7 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags, | |||
137 | 139 | ||
138 | mem->size = size; | 140 | mem->size = size; |
139 | mem->aperture = APERTURE_SYSMEM; | 141 | mem->aperture = APERTURE_SYSMEM; |
140 | mem->flags = flags; | 142 | mem->priv.flags = flags; |
141 | 143 | ||
142 | gk20a_dbg_fn("done"); | 144 | gk20a_dbg_fn("done"); |
143 | 145 | ||
@@ -146,7 +148,7 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags, | |||
146 | fail_free: | 148 | fail_free: |
147 | dma_free_coherent(d, size, mem->cpu_va, iova); | 149 | dma_free_coherent(d, size, mem->cpu_va, iova); |
148 | mem->cpu_va = NULL; | 150 | mem->cpu_va = NULL; |
149 | mem->sgt = NULL; | 151 | mem->priv.sgt = NULL; |
150 | return err; | 152 | return err; |
151 | } | 153 | } |
152 | 154 | ||
@@ -204,23 +206,23 @@ int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags, | |||
204 | else | 206 | else |
205 | mem->fixed = false; | 207 | mem->fixed = false; |
206 | 208 | ||
207 | mem->sgt = nvgpu_kzalloc(g, sizeof(struct sg_table)); | 209 | mem->priv.sgt = nvgpu_kzalloc(g, sizeof(struct sg_table)); |
208 | if (!mem->sgt) { | 210 | if (!mem->priv.sgt) { |
209 | err = -ENOMEM; | 211 | err = -ENOMEM; |
210 | goto fail_physfree; | 212 | goto fail_physfree; |
211 | } | 213 | } |
212 | 214 | ||
213 | err = sg_alloc_table(mem->sgt, 1, GFP_KERNEL); | 215 | err = sg_alloc_table(mem->priv.sgt, 1, GFP_KERNEL); |
214 | if (err) | 216 | if (err) |
215 | goto fail_kfree; | 217 | goto fail_kfree; |
216 | 218 | ||
217 | set_vidmem_page_alloc(mem->sgt->sgl, addr); | 219 | set_vidmem_page_alloc(mem->priv.sgt->sgl, addr); |
218 | sg_set_page(mem->sgt->sgl, NULL, size, 0); | 220 | sg_set_page(mem->priv.sgt->sgl, NULL, size, 0); |
219 | 221 | ||
220 | mem->size = size; | 222 | mem->size = size; |
221 | mem->aperture = APERTURE_VIDMEM; | 223 | mem->aperture = APERTURE_VIDMEM; |
222 | mem->allocator = vidmem_alloc; | 224 | mem->allocator = vidmem_alloc; |
223 | mem->flags = flags; | 225 | mem->priv.flags = flags; |
224 | 226 | ||
225 | nvgpu_init_list_node(&mem->clear_list_entry); | 227 | nvgpu_init_list_node(&mem->clear_list_entry); |
226 | 228 | ||
@@ -229,7 +231,7 @@ int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags, | |||
229 | return 0; | 231 | return 0; |
230 | 232 | ||
231 | fail_kfree: | 233 | fail_kfree: |
232 | nvgpu_kfree(g, mem->sgt); | 234 | nvgpu_kfree(g, mem->priv.sgt); |
233 | fail_physfree: | 235 | fail_physfree: |
234 | nvgpu_free(&g->mm.vidmem.allocator, addr); | 236 | nvgpu_free(&g->mm.vidmem.allocator, addr); |
235 | return err; | 237 | return err; |
@@ -283,7 +285,7 @@ int nvgpu_dma_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags, | |||
283 | if (err) | 285 | if (err) |
284 | return err; | 286 | return err; |
285 | 287 | ||
286 | mem->gpu_va = gk20a_gmmu_map(vm, &mem->sgt, size, 0, | 288 | mem->gpu_va = gk20a_gmmu_map(vm, &mem->priv.sgt, size, 0, |
287 | gk20a_mem_flag_none, false, | 289 | gk20a_mem_flag_none, false, |
288 | mem->aperture); | 290 | mem->aperture); |
289 | if (!mem->gpu_va) { | 291 | if (!mem->gpu_va) { |
@@ -313,7 +315,7 @@ int nvgpu_dma_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags, | |||
313 | if (err) | 315 | if (err) |
314 | return err; | 316 | return err; |
315 | 317 | ||
316 | mem->gpu_va = gk20a_gmmu_map(vm, &mem->sgt, size, 0, | 318 | mem->gpu_va = gk20a_gmmu_map(vm, &mem->priv.sgt, size, 0, |
317 | gk20a_mem_flag_none, false, | 319 | gk20a_mem_flag_none, false, |
318 | mem->aperture); | 320 | mem->aperture); |
319 | if (!mem->gpu_va) { | 321 | if (!mem->gpu_va) { |
@@ -332,31 +334,31 @@ static void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem) | |||
332 | { | 334 | { |
333 | struct device *d = dev_from_gk20a(g); | 335 | struct device *d = dev_from_gk20a(g); |
334 | 336 | ||
335 | if (mem->cpu_va || mem->pages) { | 337 | if (mem->cpu_va || mem->priv.pages) { |
336 | if (mem->flags) { | 338 | if (mem->priv.flags) { |
337 | DEFINE_DMA_ATTRS(dma_attrs); | 339 | DEFINE_DMA_ATTRS(dma_attrs); |
338 | 340 | ||
339 | nvgpu_dma_flags_to_attrs(&dma_attrs, mem->flags); | 341 | nvgpu_dma_flags_to_attrs(&dma_attrs, mem->priv.flags); |
340 | 342 | ||
341 | if (mem->flags & NVGPU_DMA_NO_KERNEL_MAPPING) { | 343 | if (mem->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING) { |
342 | dma_free_attrs(d, mem->size, mem->pages, | 344 | dma_free_attrs(d, mem->size, mem->priv.pages, |
343 | sg_dma_address(mem->sgt->sgl), | 345 | sg_dma_address(mem->priv.sgt->sgl), |
344 | __DMA_ATTR(dma_attrs)); | 346 | __DMA_ATTR(dma_attrs)); |
345 | } else { | 347 | } else { |
346 | dma_free_attrs(d, mem->size, mem->cpu_va, | 348 | dma_free_attrs(d, mem->size, mem->cpu_va, |
347 | sg_dma_address(mem->sgt->sgl), | 349 | sg_dma_address(mem->priv.sgt->sgl), |
348 | __DMA_ATTR(dma_attrs)); | 350 | __DMA_ATTR(dma_attrs)); |
349 | } | 351 | } |
350 | } else { | 352 | } else { |
351 | dma_free_coherent(d, mem->size, mem->cpu_va, | 353 | dma_free_coherent(d, mem->size, mem->cpu_va, |
352 | sg_dma_address(mem->sgt->sgl)); | 354 | sg_dma_address(mem->priv.sgt->sgl)); |
353 | } | 355 | } |
354 | mem->cpu_va = NULL; | 356 | mem->cpu_va = NULL; |
355 | mem->pages = NULL; | 357 | mem->priv.pages = NULL; |
356 | } | 358 | } |
357 | 359 | ||
358 | if (mem->sgt) | 360 | if (mem->priv.sgt) |
359 | gk20a_free_sgtable(g, &mem->sgt); | 361 | gk20a_free_sgtable(g, &mem->priv.sgt); |
360 | 362 | ||
361 | mem->size = 0; | 363 | mem->size = 0; |
362 | mem->aperture = APERTURE_INVALID; | 364 | mem->aperture = APERTURE_INVALID; |
@@ -368,7 +370,7 @@ static void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem) | |||
368 | bool was_empty; | 370 | bool was_empty; |
369 | 371 | ||
370 | /* Sanity check - only this supported when allocating. */ | 372 | /* Sanity check - only this supported when allocating. */ |
371 | WARN_ON(mem->flags != NVGPU_DMA_NO_KERNEL_MAPPING); | 373 | WARN_ON(mem->priv.flags != NVGPU_DMA_NO_KERNEL_MAPPING); |
372 | 374 | ||
373 | if (mem->user_mem) { | 375 | if (mem->user_mem) { |
374 | nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex); | 376 | nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex); |
@@ -385,8 +387,8 @@ static void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem) | |||
385 | } else { | 387 | } else { |
386 | nvgpu_memset(g, mem, 0, 0, mem->size); | 388 | nvgpu_memset(g, mem, 0, 0, mem->size); |
387 | nvgpu_free(mem->allocator, | 389 | nvgpu_free(mem->allocator, |
388 | (u64)get_vidmem_page_alloc(mem->sgt->sgl)); | 390 | (u64)get_vidmem_page_alloc(mem->priv.sgt->sgl)); |
389 | gk20a_free_sgtable(g, &mem->sgt); | 391 | gk20a_free_sgtable(g, &mem->priv.sgt); |
390 | 392 | ||
391 | mem->size = 0; | 393 | mem->size = 0; |
392 | mem->aperture = APERTURE_INVALID; | 394 | mem->aperture = APERTURE_INVALID; |
diff --git a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c index eb214aad..bb19dd61 100644 --- a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c +++ b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c | |||
@@ -57,7 +57,7 @@ int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem) | |||
57 | return -EBUSY; | 57 | return -EBUSY; |
58 | } | 58 | } |
59 | 59 | ||
60 | cpu_va = vmap(mem->pages, | 60 | cpu_va = vmap(mem->priv.pages, |
61 | PAGE_ALIGN(mem->size) >> PAGE_SHIFT, | 61 | PAGE_ALIGN(mem->size) >> PAGE_SHIFT, |
62 | 0, pgprot_writecombine(PAGE_KERNEL)); | 62 | 0, pgprot_writecombine(PAGE_KERNEL)); |
63 | 63 | ||