diff options
Diffstat (limited to 'drivers/gpu/nvgpu/common')
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/dma.c | 3 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/nvgpu_mem.c | 70 |
2 files changed, 72 insertions, 1 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/dma.c b/drivers/gpu/nvgpu/common/linux/dma.c index 832d0f47..7453fdef 100644 --- a/drivers/gpu/nvgpu/common/linux/dma.c +++ b/drivers/gpu/nvgpu/common/linux/dma.c | |||
@@ -334,7 +334,8 @@ static void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem) | |||
334 | { | 334 | { |
335 | struct device *d = dev_from_gk20a(g); | 335 | struct device *d = dev_from_gk20a(g); |
336 | 336 | ||
337 | if (mem->cpu_va || mem->priv.pages) { | 337 | if (!(mem->mem_flags & NVGPU_MEM_FLAG_SHADOW_COPY) && |
338 | (mem->cpu_va || mem->priv.pages)) { | ||
338 | if (mem->priv.flags) { | 339 | if (mem->priv.flags) { |
339 | DEFINE_DMA_ATTRS(dma_attrs); | 340 | DEFINE_DMA_ATTRS(dma_attrs); |
340 | 341 | ||
diff --git a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c index bb19dd61..fb7ee7fe 100644 --- a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c +++ b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c | |||
@@ -14,6 +14,7 @@ | |||
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <nvgpu/dma.h> | ||
17 | #include <nvgpu/nvgpu_mem.h> | 18 | #include <nvgpu/nvgpu_mem.h> |
18 | #include <nvgpu/page_allocator.h> | 19 | #include <nvgpu/page_allocator.h> |
19 | #include <nvgpu/log.h> | 20 | #include <nvgpu/log.h> |
@@ -52,6 +53,14 @@ int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem) | |||
52 | if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin) | 53 | if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin) |
53 | return 0; | 54 | return 0; |
54 | 55 | ||
56 | /* | ||
57 | * A CPU mapping is implicitly made for all SYSMEM DMA allocations that | ||
58 | * don't have NVGPU_DMA_NO_KERNEL_MAPPING. Thus we don't need to make | ||
59 | * another CPU mapping. | ||
60 | */ | ||
61 | if (!(mem->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING)) | ||
62 | return 0; | ||
63 | |||
55 | if (WARN_ON(mem->cpu_va)) { | 64 | if (WARN_ON(mem->cpu_va)) { |
56 | nvgpu_warn(g, "nested"); | 65 | nvgpu_warn(g, "nested"); |
57 | return -EBUSY; | 66 | return -EBUSY; |
@@ -73,6 +82,13 @@ void nvgpu_mem_end(struct gk20a *g, struct nvgpu_mem *mem) | |||
73 | if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin) | 82 | if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin) |
74 | return; | 83 | return; |
75 | 84 | ||
85 | /* | ||
86 | * Similar to nvgpu_mem_begin() we don't need to unmap the CPU mapping | ||
87 | * already made by the DMA API. | ||
88 | */ | ||
89 | if (!(mem->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING)) | ||
90 | return; | ||
91 | |||
76 | vunmap(mem->cpu_va); | 92 | vunmap(mem->cpu_va); |
77 | mem->cpu_va = NULL; | 93 | mem->cpu_va = NULL; |
78 | } | 94 | } |
@@ -225,3 +241,57 @@ void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, | |||
225 | WARN_ON("Accessing unallocated nvgpu_mem"); | 241 | WARN_ON("Accessing unallocated nvgpu_mem"); |
226 | } | 242 | } |
227 | } | 243 | } |
244 | |||
245 | /* | ||
246 | * Be careful how you use this! You are responsible for correctly freeing this | ||
247 | * memory. | ||
248 | */ | ||
249 | int nvgpu_mem_create_from_mem(struct gk20a *g, | ||
250 | struct nvgpu_mem *dest, struct nvgpu_mem *src, | ||
251 | int start_page, int nr_pages) | ||
252 | { | ||
253 | int ret; | ||
254 | u64 start = start_page * PAGE_SIZE; | ||
255 | u64 size = nr_pages * PAGE_SIZE; | ||
256 | dma_addr_t new_iova; | ||
257 | |||
258 | if (src->aperture != APERTURE_SYSMEM) | ||
259 | return -EINVAL; | ||
260 | |||
261 | /* Some silly things a caller might do... */ | ||
262 | if (size > src->size) | ||
263 | return -EINVAL; | ||
264 | if ((start + size) > src->size) | ||
265 | return -EINVAL; | ||
266 | |||
267 | dest->mem_flags = src->mem_flags | NVGPU_MEM_FLAG_SHADOW_COPY; | ||
268 | dest->aperture = src->aperture; | ||
269 | dest->skip_wmb = src->skip_wmb; | ||
270 | dest->size = size; | ||
271 | |||
272 | /* | ||
273 | * Re-use the CPU mapping only if the mapping was made by the DMA API. | ||
274 | */ | ||
275 | if (!(src->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING)) | ||
276 | dest->cpu_va = src->cpu_va + (PAGE_SIZE * start_page); | ||
277 | |||
278 | dest->priv.pages = src->priv.pages + start_page; | ||
279 | dest->priv.flags = src->priv.flags; | ||
280 | |||
281 | new_iova = sg_dma_address(src->priv.sgt->sgl) ? | ||
282 | sg_dma_address(src->priv.sgt->sgl) + start : 0; | ||
283 | |||
284 | /* | ||
285 | * Make a new SG table that is based only on the subset of pages that | ||
286 | * is passed to us. This table gets freed by the dma free routines. | ||
287 | */ | ||
288 | if (src->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING) | ||
289 | ret = gk20a_get_sgtable_from_pages(g->dev, &dest->priv.sgt, | ||
290 | src->priv.pages + start_page, | ||
291 | new_iova, size); | ||
292 | else | ||
293 | ret = gk20a_get_sgtable(g->dev, &dest->priv.sgt, dest->cpu_va, | ||
294 | new_iova, size); | ||
295 | |||
296 | return ret; | ||
297 | } | ||