summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-05-25 19:56:50 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-09-22 15:52:48 -0400
commit0090ee5aca268a3c359f34c74b8c521df3bd8593 (patch)
tree2779dc64554cdb38b717ce09c0e3dcbf36107ed3 /drivers/gpu/nvgpu/gk20a
parente32cc0108cf2ef5de7a17f0f6c0aa9af7faf23ed (diff)
gpu: nvgpu: nvgpu SGL implementation
The last major item preventing the core MM code in the nvgpu driver from being platform agnostic is the usage of Linux scattergather tables and scattergather lists. These data structures are used throughout the mapping code to handle discontiguous DMA allocations and also overloaded to represent VIDMEM allocs. The notion of a scatter gather table is crucial to a HW device that can handle discontiguous DMA. The GPU has a MMU which allows the GPU to do page gathering and present a virtually contiguous buffer to the GPU HW. As a result it makes sense for the GPU driver to use some sort of scatter gather concept so maximize memory usage efficiency. To that end this patch keeps the notion of a scatter gather list but implements it in the nvgpu common code. It is based heavily on the Linux SGL concept. It is a singly linked list of blocks - each representing a chunk of memory. To map or use a DMA allocation SW must iterate over each block in the SGL. This patch implements the most basic level of support for this data structure. There are certainly easy optimizations that could be done to speed up the current implementation. However, this patches' goal is to simply divest the core MM code from any last Linux'isms. Speed and efficiency come next. Change-Id: Icf44641db22d87fa1d003debbd9f71b605258e42 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1530867 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h9
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c20
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h43
-rw-r--r--drivers/gpu/nvgpu/gk20a/pramin_gk20a.c13
-rw-r--r--drivers/gpu/nvgpu/gk20a/pramin_gk20a.h6
5 files changed, 43 insertions, 48 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 7eee2d51..355228db 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -34,6 +34,7 @@ struct gk20a_debug_output;
34struct nvgpu_clk_pll_debug_data; 34struct nvgpu_clk_pll_debug_data;
35struct nvgpu_nvhost_dev; 35struct nvgpu_nvhost_dev;
36struct nvgpu_cpu_time_correlation_sample; 36struct nvgpu_cpu_time_correlation_sample;
37struct nvgpu_mem_sgl;
37 38
38#include <nvgpu/lock.h> 39#include <nvgpu/lock.h>
39#include <nvgpu/thread.h> 40#include <nvgpu/thread.h>
@@ -70,8 +71,6 @@ struct nvgpu_cpu_time_correlation_sample;
70#endif 71#endif
71#include "ecc_gk20a.h" 72#include "ecc_gk20a.h"
72 73
73struct page_alloc_chunk;
74
75/* PTIMER_REF_FREQ_HZ corresponds to a period of 32 nanoseconds. 74/* PTIMER_REF_FREQ_HZ corresponds to a period of 32 nanoseconds.
76 32 ns is the resolution of ptimer. */ 75 32 ns is the resolution of ptimer. */
77#define PTIMER_REF_FREQ_HZ 31250000 76#define PTIMER_REF_FREQ_HZ 31250000
@@ -701,7 +700,7 @@ struct gpu_ops {
701 bool (*support_sparse)(struct gk20a *g); 700 bool (*support_sparse)(struct gk20a *g);
702 u64 (*gmmu_map)(struct vm_gk20a *vm, 701 u64 (*gmmu_map)(struct vm_gk20a *vm,
703 u64 map_offset, 702 u64 map_offset,
704 struct sg_table *sgt, 703 struct nvgpu_mem_sgl *sgl,
705 u64 buffer_offset, 704 u64 buffer_offset,
706 u64 size, 705 u64 size,
707 int pgsz_idx, 706 int pgsz_idx,
@@ -761,9 +760,9 @@ struct gpu_ops {
761 size_t size); 760 size_t size);
762 struct { 761 struct {
763 u32 (*enter)(struct gk20a *g, struct nvgpu_mem *mem, 762 u32 (*enter)(struct gk20a *g, struct nvgpu_mem *mem,
764 struct page_alloc_chunk *chunk, u32 w); 763 struct nvgpu_mem_sgl *sgl, u32 w);
765 void (*exit)(struct gk20a *g, struct nvgpu_mem *mem, 764 void (*exit)(struct gk20a *g, struct nvgpu_mem *mem,
766 struct page_alloc_chunk *chunk); 765 struct nvgpu_mem_sgl *sgl);
767 u32 (*data032_r)(u32 i); 766 u32 (*data032_r)(u32 i);
768 } pramin; 767 } pramin;
769 struct { 768 struct {
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 97b7aa80..cd34e769 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -1151,7 +1151,7 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem)
1151 struct gk20a_fence *gk20a_fence_out = NULL; 1151 struct gk20a_fence *gk20a_fence_out = NULL;
1152 struct gk20a_fence *gk20a_last_fence = NULL; 1152 struct gk20a_fence *gk20a_last_fence = NULL;
1153 struct nvgpu_page_alloc *alloc = NULL; 1153 struct nvgpu_page_alloc *alloc = NULL;
1154 struct page_alloc_chunk *chunk = NULL; 1154 struct nvgpu_mem_sgl *sgl = NULL;
1155 int err = 0; 1155 int err = 0;
1156 1156
1157 if (g->mm.vidmem.ce_ctx_id == (u32)~0) 1157 if (g->mm.vidmem.ce_ctx_id == (u32)~0)
@@ -1159,16 +1159,16 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem)
1159 1159
1160 alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl); 1160 alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl);
1161 1161
1162 nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks, 1162 sgl = alloc->sgl;
1163 page_alloc_chunk, list_entry) { 1163 while (sgl) {
1164 if (gk20a_last_fence) 1164 if (gk20a_last_fence)
1165 gk20a_fence_put(gk20a_last_fence); 1165 gk20a_fence_put(gk20a_last_fence);
1166 1166
1167 err = gk20a_ce_execute_ops(g, 1167 err = gk20a_ce_execute_ops(g,
1168 g->mm.vidmem.ce_ctx_id, 1168 g->mm.vidmem.ce_ctx_id,
1169 0, 1169 0,
1170 chunk->base, 1170 nvgpu_mem_sgl_phys(sgl),
1171 chunk->length, 1171 nvgpu_mem_sgl_length(sgl),
1172 0x00000000, 1172 0x00000000,
1173 NVGPU_CE_DST_LOCATION_LOCAL_FB, 1173 NVGPU_CE_DST_LOCATION_LOCAL_FB,
1174 NVGPU_CE_MEMSET, 1174 NVGPU_CE_MEMSET,
@@ -1183,6 +1183,7 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem)
1183 } 1183 }
1184 1184
1185 gk20a_last_fence = gk20a_fence_out; 1185 gk20a_last_fence = gk20a_fence_out;
1186 sgl = nvgpu_mem_sgl_next(sgl);
1186 } 1187 }
1187 1188
1188 if (gk20a_last_fence) { 1189 if (gk20a_last_fence) {
@@ -1262,10 +1263,10 @@ dma_addr_t gk20a_mm_gpuva_to_iova_base(struct vm_gk20a *vm, u64 gpu_vaddr)
1262 return addr; 1263 return addr;
1263} 1264}
1264 1265
1265u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, dma_addr_t iova) 1266u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, u64 iova)
1266{ 1267{
1267 /* ensure it is not vidmem allocation */ 1268 /* ensure it is not vidmem allocation */
1268 WARN_ON(is_vidmem_page_alloc((u64)iova)); 1269 WARN_ON(is_vidmem_page_alloc(iova));
1269 1270
1270 if (device_is_iommuable(dev_from_gk20a(g)) && 1271 if (device_is_iommuable(dev_from_gk20a(g)) &&
1271 g->ops.mm.get_physical_addr_bits) 1272 g->ops.mm.get_physical_addr_bits)
@@ -2167,11 +2168,6 @@ u32 gk20a_mm_get_physical_addr_bits(struct gk20a *g)
2167 return 34; 2168 return 34;
2168} 2169}
2169 2170
2170u64 gk20a_mm_gpu_phys_addr(struct gk20a *g, u64 phys, u32 flags)
2171{
2172 return phys;
2173}
2174
2175const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g, 2171const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g,
2176 u32 big_page_size) 2172 u32 big_page_size)
2177{ 2173{
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index c77bebf8..2fdc1729 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -336,7 +336,6 @@ void gk20a_mm_dump_vm(struct vm_gk20a *vm,
336 336
337int gk20a_mm_suspend(struct gk20a *g); 337int gk20a_mm_suspend(struct gk20a *g);
338 338
339u64 gk20a_mm_gpu_phys_addr(struct gk20a *g, u64 phys, u32 flags);
340u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, dma_addr_t iova); 339u64 gk20a_mm_smmu_vaddr_translate(struct gk20a *g, dma_addr_t iova);
341 340
342void gk20a_mm_ltc_isr(struct gk20a *g); 341void gk20a_mm_ltc_isr(struct gk20a *g);
@@ -361,29 +360,29 @@ static inline phys_addr_t gk20a_mem_phys(struct nvgpu_mem *mem)
361} 360}
362 361
363u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, 362u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
364 u64 map_offset, 363 u64 map_offset,
365 struct sg_table *sgt, 364 struct nvgpu_mem_sgl *sgl,
366 u64 buffer_offset, 365 u64 buffer_offset,
367 u64 size, 366 u64 size,
368 int pgsz_idx, 367 int pgsz_idx,
369 u8 kind_v, 368 u8 kind_v,
370 u32 ctag_offset, 369 u32 ctag_offset,
371 u32 flags, 370 u32 flags,
372 int rw_flag, 371 int rw_flag,
373 bool clear_ctags, 372 bool clear_ctags,
374 bool sparse, 373 bool sparse,
375 bool priv, 374 bool priv,
376 struct vm_gk20a_mapping_batch *batch, 375 struct vm_gk20a_mapping_batch *batch,
377 enum nvgpu_aperture aperture); 376 enum nvgpu_aperture aperture);
378 377
379void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm, 378void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
380 u64 vaddr, 379 u64 vaddr,
381 u64 size, 380 u64 size,
382 int pgsz_idx, 381 int pgsz_idx,
383 bool va_allocated, 382 bool va_allocated,
384 int rw_flag, 383 int rw_flag,
385 bool sparse, 384 bool sparse,
386 struct vm_gk20a_mapping_batch *batch); 385 struct vm_gk20a_mapping_batch *batch);
387 386
388struct sg_table *gk20a_mm_pin(struct device *dev, struct dma_buf *dmabuf); 387struct sg_table *gk20a_mm_pin(struct device *dev, struct dma_buf *dmabuf);
389void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf, 388void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf,
diff --git a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c
index 9d19e9e5..8a34a63c 100644
--- a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.c
@@ -26,9 +26,9 @@
26 26
27/* WARNING: returns pramin_window_lock taken, complement with pramin_exit() */ 27/* WARNING: returns pramin_window_lock taken, complement with pramin_exit() */
28u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem, 28u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem,
29 struct page_alloc_chunk *chunk, u32 w) 29 struct nvgpu_mem_sgl *sgl, u32 w)
30{ 30{
31 u64 bufbase = chunk->base; 31 u64 bufbase = nvgpu_mem_sgl_phys(sgl);
32 u64 addr = bufbase + w * sizeof(u32); 32 u64 addr = bufbase + w * sizeof(u32);
33 u32 hi = (u32)((addr & ~(u64)0xfffff) 33 u32 hi = (u32)((addr & ~(u64)0xfffff)
34 >> bus_bar0_window_target_bar0_window_base_shift_v()); 34 >> bus_bar0_window_target_bar0_window_base_shift_v());
@@ -40,8 +40,9 @@ u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem,
40 40
41 gk20a_dbg(gpu_dbg_mem, 41 gk20a_dbg(gpu_dbg_mem,
42 "0x%08x:%08x begin for %p,%p at [%llx,%llx] (sz %llx)", 42 "0x%08x:%08x begin for %p,%p at [%llx,%llx] (sz %llx)",
43 hi, lo, mem, chunk, bufbase, 43 hi, lo, mem, sgl, bufbase,
44 bufbase + chunk->length, chunk->length); 44 bufbase + nvgpu_mem_sgl_phys(sgl),
45 nvgpu_mem_sgl_length(sgl));
45 46
46 WARN_ON(!bufbase); 47 WARN_ON(!bufbase);
47 48
@@ -57,9 +58,9 @@ u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem,
57} 58}
58 59
59void gk20a_pramin_exit(struct gk20a *g, struct nvgpu_mem *mem, 60void gk20a_pramin_exit(struct gk20a *g, struct nvgpu_mem *mem,
60 struct page_alloc_chunk *chunk) 61 struct nvgpu_mem_sgl *sgl)
61{ 62{
62 gk20a_dbg(gpu_dbg_mem, "end for %p,%p", mem, chunk); 63 gk20a_dbg(gpu_dbg_mem, "end for %p,%p", mem, sgl);
63 64
64 nvgpu_spinlock_release(&g->mm.pramin_window_lock); 65 nvgpu_spinlock_release(&g->mm.pramin_window_lock);
65} 66}
diff --git a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.h b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.h
index 1a1ac871..fc5ba919 100644
--- a/drivers/gpu/nvgpu/gk20a/pramin_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/pramin_gk20a.h
@@ -19,10 +19,10 @@
19 19
20struct gk20a; 20struct gk20a;
21struct nvgpu_mem; 21struct nvgpu_mem;
22struct page_alloc_chunk; 22struct nvgpu_mem_sgl;
23 23
24u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem, 24u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem,
25 struct page_alloc_chunk *chunk, u32 w); 25 struct nvgpu_mem_sgl *sgl, u32 w);
26void gk20a_pramin_exit(struct gk20a *g, struct nvgpu_mem *mem, 26void gk20a_pramin_exit(struct gk20a *g, struct nvgpu_mem *mem,
27 struct page_alloc_chunk *chunk); 27 struct nvgpu_mem_sgl *sgl);
28#endif 28#endif