diff options
author | Alex Waterman <alexw@nvidia.com> | 2017-05-25 19:56:50 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-09-22 15:52:48 -0400 |
commit | 0090ee5aca268a3c359f34c74b8c521df3bd8593 (patch) | |
tree | 2779dc64554cdb38b717ce09c0e3dcbf36107ed3 /drivers/gpu/nvgpu/gk20a/mm_gk20a.c | |
parent | e32cc0108cf2ef5de7a17f0f6c0aa9af7faf23ed (diff) |
gpu: nvgpu: nvgpu SGL implementation
The last major item preventing the core MM code in the nvgpu
driver from being platform agnostic is the usage of Linux
scattergather tables and scattergather lists. These data
structures are used throughout the mapping code to handle
discontiguous DMA allocations and also overloaded to represent
VIDMEM allocs.
The notion of a scatter gather table is crucial to a HW device
that can handle discontiguous DMA. The GPU has a MMU which
allows the GPU to do page gathering and present a virtually
contiguous buffer to the GPU HW. As a result it makes sense
for the GPU driver to use some sort of scatter gather concept
so maximize memory usage efficiency.
To that end this patch keeps the notion of a scatter gather
list but implements it in the nvgpu common code. It is based
heavily on the Linux SGL concept. It is a singly linked list
of blocks - each representing a chunk of memory. To map or
use a DMA allocation SW must iterate over each block in the
SGL.
This patch implements the most basic level of support for this
data structure. There are certainly easy optimizations that
could be done to speed up the current implementation. However,
this patches' goal is to simply divest the core MM code from
any last Linux'isms. Speed and efficiency come next.
Change-Id: Icf44641db22d87fa1d003debbd9f71b605258e42
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1530867
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 20 |
1 files changed, 8 insertions, 12 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 97b7aa80..cd34e769 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c | |||
@@ -1151,7 +1151,7 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem) | |||
1151 | struct gk20a_fence *gk20a_fence_out = NULL; | 1151 | struct gk20a_fence *gk20a_fence_out = NULL; |
1152 | struct gk20a_fence *gk20a_last_fence = NULL; | 1152 | struct gk20a_fence *gk20a_last_fence = NULL; |
1153 | struct nvgpu_page_alloc *alloc = NULL; | 1153 | struct nvgpu_page_alloc *alloc = NULL; |
1154 | struct page_alloc_chunk *chunk = NULL; | 1154 | struct nvgpu_mem_sgl *sgl = NULL; |
1155 | int err = 0; | 1155 | int err = 0; |
1156 | 1156 | ||
1157 | if (g->mm.vidmem.ce_ctx_id == (u32)~0) | 1157 | if (g->mm.vidmem.ce_ctx_id == (u32)~0) |
@@ -1159,16 +1159,16 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem) | |||
1159 | 1159 | ||
1160 | alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl); | 1160 | alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl); |
1161 | 1161 | ||
1162 | nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks, | 1162 | sgl = alloc->sgl; |
1163 | page_alloc_chunk, list_entry) { | 1163 | while (sgl) { |
1164 | if (gk20a_last_fence) | 1164 | if (gk20a_last_fence) |
1165 | gk20a_fence_put(gk20a_last_fence); | 1165 | gk20a_fence_put(gk20a_last_fence); |
1166 | 1166 | ||
1167 | err = gk20a_ce_execute_ops(g, | 1167 | err = gk20a_ce_execute_ops(g, |
1168 | g->mm.vidmem.ce_ctx_id, | 1168 | g->mm.vidmem.ce_ctx_id, |
1169 | 0, | 1169 | 0, |
1170 | chunk->base, | 1170 | nvgpu_mem_sgl_phys(sgl), |
1171 | chunk->length, | 1171 | nvgpu_mem_sgl_length(sgl), |
1172 | 0x00000000, | 1172 | 0x00000000, |
1173 | NVGPU_CE_DST_LOCATION_LOCAL_FB, | 1173 | NVGPU_CE_DST_LOCATION_LOCAL_FB, |
1174 | NVGPU_CE_MEMSET, | 1174 | NVGPU_CE_MEMSET, |
@@ -1183,6 +1183,7 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem) | |||
1183 | } | 1183 | } |
1184 | 1184 | ||
1185 | gk20a_last_fence = gk20a_fence_out; | 1185 | gk20a_last_fence = gk20a_fence_out; |
1186 | sgl = nvgpu_mem_sgl_next(sgl); | ||
1186 | } | 1187 | } |
1187 | 1188 | ||
1188 | if (gk20a_last_fence) { | 1189 | if (gk20a_last_fence) { |
@@ -1262,10 +1263,10 @@ dma_addr_t gk20a_mm_gpuva_to_iova_base(struct vm_gk20a *vm, u64 gpu_vaddr) | |||
1262 | return addr; | 1263 | return addr; |
1263 | } | 1264 | } |
1264 | 1265 | ||
1265 | u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, dma_addr_t iova) | 1266 | u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, u64 iova) |
1266 | { | 1267 | { |
1267 | /* ensure it is not vidmem allocation */ | 1268 | /* ensure it is not vidmem allocation */ |
1268 | WARN_ON(is_vidmem_page_alloc((u64)iova)); | 1269 | WARN_ON(is_vidmem_page_alloc(iova)); |
1269 | 1270 | ||
1270 | if (device_is_iommuable(dev_from_gk20a(g)) && | 1271 | if (device_is_iommuable(dev_from_gk20a(g)) && |
1271 | g->ops.mm.get_physical_addr_bits) | 1272 | g->ops.mm.get_physical_addr_bits) |
@@ -2167,11 +2168,6 @@ u32 gk20a_mm_get_physical_addr_bits(struct gk20a *g) | |||
2167 | return 34; | 2168 | return 34; |
2168 | } | 2169 | } |
2169 | 2170 | ||
2170 | u64 gk20a_mm_gpu_phys_addr(struct gk20a *g, u64 phys, u32 flags) | ||
2171 | { | ||
2172 | return phys; | ||
2173 | } | ||
2174 | |||
2175 | const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g, | 2171 | const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g, |
2176 | u32 big_page_size) | 2172 | u32 big_page_size) |
2177 | { | 2173 | { |