summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-06-07 20:32:56 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-08-04 17:54:32 -0400
commit1da69dd8b2c60a11e112844dd4e9636a913a99a0 (patch)
tree56e6912518e205b1e999881cb02f7fa504878846 /drivers/gpu/nvgpu/common
parent192cf8c1f8d1005ab08619c9152d514dec3a34ef (diff)
gpu: nvgpu: Remove mm.get_iova_addr
Remove the mm.get_iova_addr() HAL and replace it with a new HAL called mm.gpu_phys_addr(). This new HAL provides the real phys address that should be passed to the GPU from a physical address obtained from a scatter list. It also provides a mechanism by which the HAL code can add extra bits to a GPU physical address based on the attributes passed in. This is necessary during GMMU page table programming. Also remove the flags argument from the various address functions. This flag was used for adding an IO coherence bit to the GPU physical address which is not supported. JIRA NVGPU-30 Change-Id: I69af5b1c6bd905c4077c26c098fac101c6b41a33 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1530864 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common')
-rw-r--r--drivers/gpu/nvgpu/common/linux/nvgpu_mem.c58
-rw-r--r--drivers/gpu/nvgpu/common/mm/gmmu.c6
2 files changed, 61 insertions, 3 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
index 34fd6626..e4991d0d 100644
--- a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include <nvgpu/dma.h> 17#include <nvgpu/dma.h>
18#include <nvgpu/gmmu.h>
18#include <nvgpu/nvgpu_mem.h> 19#include <nvgpu/nvgpu_mem.h>
19#include <nvgpu/page_allocator.h> 20#include <nvgpu/page_allocator.h>
20#include <nvgpu/log.h> 21#include <nvgpu/log.h>
@@ -23,6 +24,8 @@
23 24
24#include <nvgpu/linux/dma.h> 25#include <nvgpu/linux/dma.h>
25 26
27#include "os_linux.h"
28
26#include "gk20a/gk20a.h" 29#include "gk20a/gk20a.h"
27#include "gk20a/mm_gk20a.h" 30#include "gk20a/mm_gk20a.h"
28 31
@@ -247,6 +250,61 @@ void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
247} 250}
248 251
249/* 252/*
253 * Obtain a SYSMEM address from a Linux SGL. This should eventually go away
254 * and/or become private to this file once all bad usages of Linux SGLs are
255 * cleaned up in the driver.
256 */
257u64 nvgpu_mem_get_addr_sgl(struct gk20a *g, struct scatterlist *sgl)
258{
259 struct nvgpu_os_linux *l = container_of(g, struct nvgpu_os_linux, g);
260
261 if (!device_is_iommuable(l->dev))
262 return g->ops.mm.gpu_phys_addr(g, NULL, sg_phys(sgl));
263
264 if (sg_dma_address(sgl) == 0)
265 return g->ops.mm.gpu_phys_addr(g, NULL, sg_phys(sgl));
266
267 if (sg_dma_address(sgl) == DMA_ERROR_CODE)
268 return 0;
269
270 return gk20a_mm_smmu_vaddr_translate(g, sg_dma_address(sgl));
271}
272
273/*
274 * Obtain the address the GPU should use from the %mem assuming this is a SYSMEM
275 * allocation.
276 */
277static u64 nvgpu_mem_get_addr_sysmem(struct gk20a *g, struct nvgpu_mem *mem)
278{
279 return nvgpu_mem_get_addr_sgl(g, mem->priv.sgt->sgl);
280}
281
282/*
283 * Return the base address of %mem. Handles whether this is a VIDMEM or SYSMEM
284 * allocation.
285 *
286 * %attrs can be NULL. If it is not NULL then it may be inspected to determine
287 * if the address needs to be modified before writing into a PTE.
288 */
289u64 nvgpu_mem_get_addr(struct gk20a *g, struct nvgpu_mem *mem)
290{
291 struct nvgpu_page_alloc *alloc;
292
293 if (mem->aperture == APERTURE_SYSMEM)
294 return nvgpu_mem_get_addr_sysmem(g, mem);
295
296 /*
297 * Otherwise get the vidmem address.
298 */
299 alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl);
300
301 /* This API should not be used with > 1 chunks */
302 WARN_ON(alloc->nr_chunks != 1);
303
304 return alloc->base;
305}
306
307/*
250 * Be careful how you use this! You are responsible for correctly freeing this 308 * Be careful how you use this! You are responsible for correctly freeing this
251 * memory. 309 * memory.
252 */ 310 */
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c
index 1be87c85..30be1b85 100644
--- a/drivers/gpu/nvgpu/common/mm/gmmu.c
+++ b/drivers/gpu/nvgpu/common/mm/gmmu.c
@@ -201,7 +201,7 @@ u64 nvgpu_pde_phys_addr(struct gk20a *g, struct nvgpu_gmmu_pd *pd)
201 if (g->mm.has_physical_mode) 201 if (g->mm.has_physical_mode)
202 page_addr = sg_phys(pd->mem->priv.sgt->sgl); 202 page_addr = sg_phys(pd->mem->priv.sgt->sgl);
203 else 203 else
204 page_addr = nvgpu_mem_get_base_addr(g, pd->mem, 0); 204 page_addr = nvgpu_mem_get_addr(g, pd->mem);
205 205
206 return page_addr + pd->mem_offs; 206 return page_addr + pd->mem_offs;
207} 207}
@@ -559,7 +559,7 @@ static int __nvgpu_gmmu_update_page_table_sysmem(struct vm_gk20a *vm,
559 sgl = sgt->sgl; 559 sgl = sgt->sgl;
560 560
561 if (!g->mm.bypass_smmu) { 561 if (!g->mm.bypass_smmu) {
562 u64 io_addr = g->ops.mm.get_iova_addr(g, sgl, 0); 562 u64 io_addr = nvgpu_mem_get_addr_sgl(g, sgl);
563 563
564 io_addr += space_to_skip; 564 io_addr += space_to_skip;
565 565
@@ -670,7 +670,7 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm,
670 670
671 phys_addr = alloc->base; 671 phys_addr = alloc->base;
672 } else 672 } else
673 phys_addr = g->ops.mm.get_iova_addr(g, sgt->sgl, 0); 673 phys_addr = nvgpu_mem_get_addr_sgl(g, sgt->sgl);
674 } 674 }
675 675
676 __gmmu_dbg(g, attrs, 676 __gmmu_dbg(g, attrs,