diff options
author | Alex Waterman <alexw@nvidia.com> | 2017-05-25 19:56:50 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-09-22 15:52:48 -0400 |
commit | 0090ee5aca268a3c359f34c74b8c521df3bd8593 (patch) | |
tree | 2779dc64554cdb38b717ce09c0e3dcbf36107ed3 /drivers/gpu/nvgpu/include | |
parent | e32cc0108cf2ef5de7a17f0f6c0aa9af7faf23ed (diff) |
gpu: nvgpu: nvgpu SGL implementation
The last major item preventing the core MM code in the nvgpu
driver from being platform agnostic is the usage of Linux
scattergather tables and scattergather lists. These data
structures are used throughout the mapping code to handle
discontiguous DMA allocations and also overloaded to represent
VIDMEM allocs.
The notion of a scatter gather table is crucial to a HW device
that can handle discontiguous DMA. The GPU has a MMU which
allows the GPU to do page gathering and present a virtually
contiguous buffer to the GPU HW. As a result it makes sense
for the GPU driver to use some sort of scatter gather concept
so maximize memory usage efficiency.
To that end this patch keeps the notion of a scatter gather
list but implements it in the nvgpu common code. It is based
heavily on the Linux SGL concept. It is a singly linked list
of blocks - each representing a chunk of memory. To map or
use a DMA allocation SW must iterate over each block in the
SGL.
This patch implements the most basic level of support for this
data structure. There are certainly easy optimizations that
could be done to speed up the current implementation. However,
this patches' goal is to simply divest the core MM code from
any last Linux'isms. Speed and efficiency come next.
Change-Id: Icf44641db22d87fa1d003debbd9f71b605258e42
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1530867
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/include')
-rw-r--r-- | drivers/gpu/nvgpu/include/nvgpu/gmmu.h | 2 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/include/nvgpu/linux/nvgpu_mem.h | 2 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/include/nvgpu/log.h | 1 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h | 45 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/include/nvgpu/page_allocator.h | 22 |
5 files changed, 54 insertions, 18 deletions
diff --git a/drivers/gpu/nvgpu/include/nvgpu/gmmu.h b/drivers/gpu/nvgpu/include/nvgpu/gmmu.h index de129a5f..11060300 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/gmmu.h +++ b/drivers/gpu/nvgpu/include/nvgpu/gmmu.h | |||
@@ -27,8 +27,6 @@ | |||
27 | #include <nvgpu/gmmu_t19x.h> | 27 | #include <nvgpu/gmmu_t19x.h> |
28 | #endif | 28 | #endif |
29 | 29 | ||
30 | struct scatterlist; | ||
31 | |||
32 | /* | 30 | /* |
33 | * This is the GMMU API visible to blocks outside of the GMMU. Basically this | 31 | * This is the GMMU API visible to blocks outside of the GMMU. Basically this |
34 | * API supports all the different types of mappings that might be done in the | 32 | * API supports all the different types of mappings that might be done in the |
diff --git a/drivers/gpu/nvgpu/include/nvgpu/linux/nvgpu_mem.h b/drivers/gpu/nvgpu/include/nvgpu/linux/nvgpu_mem.h index e2d4d336..f96c2801 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/linux/nvgpu_mem.h +++ b/drivers/gpu/nvgpu/include/nvgpu/linux/nvgpu_mem.h | |||
@@ -32,6 +32,8 @@ struct nvgpu_mem_priv { | |||
32 | }; | 32 | }; |
33 | 33 | ||
34 | u64 nvgpu_mem_get_addr_sgl(struct gk20a *g, struct scatterlist *sgl); | 34 | u64 nvgpu_mem_get_addr_sgl(struct gk20a *g, struct scatterlist *sgl); |
35 | struct nvgpu_mem_sgl *nvgpu_mem_sgl_create(struct gk20a *g, | ||
36 | struct sg_table *sgt); | ||
35 | 37 | ||
36 | /** | 38 | /** |
37 | * __nvgpu_mem_create_from_pages - Create an nvgpu_mem from physical pages. | 39 | * __nvgpu_mem_create_from_pages - Create an nvgpu_mem from physical pages. |
diff --git a/drivers/gpu/nvgpu/include/nvgpu/log.h b/drivers/gpu/nvgpu/include/nvgpu/log.h index 4cac3e70..cfce8c5b 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/log.h +++ b/drivers/gpu/nvgpu/include/nvgpu/log.h | |||
@@ -71,6 +71,7 @@ enum nvgpu_log_categories { | |||
71 | gpu_dbg_pd_cache = BIT(20), /* PD cache traces. */ | 71 | gpu_dbg_pd_cache = BIT(20), /* PD cache traces. */ |
72 | gpu_dbg_alloc = BIT(21), /* Allocator debugging. */ | 72 | gpu_dbg_alloc = BIT(21), /* Allocator debugging. */ |
73 | gpu_dbg_dma = BIT(22), /* DMA allocation prints. */ | 73 | gpu_dbg_dma = BIT(22), /* DMA allocation prints. */ |
74 | gpu_dbg_sgl = BIT(23), /* SGL related traces. */ | ||
74 | gpu_dbg_mem = BIT(31), /* memory accesses; very verbose. */ | 75 | gpu_dbg_mem = BIT(31), /* memory accesses; very verbose. */ |
75 | }; | 76 | }; |
76 | 77 | ||
diff --git a/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h b/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h index a112623e..7d19cf81 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h +++ b/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h | |||
@@ -33,6 +33,8 @@ struct gk20a; | |||
33 | struct nvgpu_allocator; | 33 | struct nvgpu_allocator; |
34 | struct nvgpu_gmmu_attrs; | 34 | struct nvgpu_gmmu_attrs; |
35 | 35 | ||
36 | #define NVGPU_MEM_DMA_ERROR (~0ULL) | ||
37 | |||
36 | /* | 38 | /* |
37 | * Real location of a buffer - nvgpu_aperture_mask() will deduce what will be | 39 | * Real location of a buffer - nvgpu_aperture_mask() will deduce what will be |
38 | * told to the gpu about the aperture, but this flag designates where the | 40 | * told to the gpu about the aperture, but this flag designates where the |
@@ -44,6 +46,28 @@ enum nvgpu_aperture { | |||
44 | APERTURE_VIDMEM | 46 | APERTURE_VIDMEM |
45 | }; | 47 | }; |
46 | 48 | ||
49 | /* | ||
50 | * This struct holds the necessary information for describing a struct | ||
51 | * nvgpu_mem's scatter gather list. | ||
52 | * | ||
53 | * These are created in a platform dependent way. As a result the function | ||
54 | * definition for allocating these lives in the <nvgpu/_OS_/nvgpu_mem.h> file. | ||
55 | */ | ||
56 | struct nvgpu_mem_sgl { | ||
57 | /* | ||
58 | * Internally this is implemented as a singly linked list. | ||
59 | */ | ||
60 | struct nvgpu_mem_sgl *next; | ||
61 | |||
62 | /* | ||
63 | * There is both a phys address and a DMA address since some systems, | ||
64 | * for example ones with an IOMMU, may see these as different addresses. | ||
65 | */ | ||
66 | u64 phys; | ||
67 | u64 dma; | ||
68 | u64 length; | ||
69 | }; | ||
70 | |||
47 | struct nvgpu_mem { | 71 | struct nvgpu_mem { |
48 | /* | 72 | /* |
49 | * Populated for all nvgpu_mem structs - vidmem or system. | 73 | * Populated for all nvgpu_mem structs - vidmem or system. |
@@ -176,6 +200,27 @@ int nvgpu_mem_create_from_mem(struct gk20a *g, | |||
176 | struct nvgpu_mem *dest, struct nvgpu_mem *src, | 200 | struct nvgpu_mem *dest, struct nvgpu_mem *src, |
177 | int start_page, int nr_pages); | 201 | int start_page, int nr_pages); |
178 | 202 | ||
203 | /** | ||
204 | * nvgpu_mem_sgl_create_from_mem - Create a scatter list from an nvgpu_mem. | ||
205 | * | ||
206 | * @g - The GPU. | ||
207 | * @mem - The source memory allocation to use. | ||
208 | * | ||
209 | * Create a scatter gather list from the passed @mem struct. This list lets the | ||
210 | * calling code iterate across each chunk of a DMA allocation for when that DMA | ||
211 | * allocation is not completely contiguous. | ||
212 | */ | ||
213 | struct nvgpu_mem_sgl *nvgpu_mem_sgl_create_from_mem(struct gk20a *g, | ||
214 | struct nvgpu_mem *mem); | ||
215 | void nvgpu_mem_sgl_free(struct gk20a *g, struct nvgpu_mem_sgl *sgl); | ||
216 | |||
217 | struct nvgpu_mem_sgl *nvgpu_mem_sgl_next(struct nvgpu_mem_sgl *sgl); | ||
218 | u64 nvgpu_mem_sgl_phys(struct nvgpu_mem_sgl *sgl); | ||
219 | u64 nvgpu_mem_sgl_dma(struct nvgpu_mem_sgl *sgl); | ||
220 | u64 nvgpu_mem_sgl_length(struct nvgpu_mem_sgl *sgl); | ||
221 | u64 nvgpu_mem_sgl_gpu_addr(struct gk20a *g, struct nvgpu_mem_sgl *sgl, | ||
222 | struct nvgpu_gmmu_attrs *attrs); | ||
223 | |||
179 | /* | 224 | /* |
180 | * Buffer accessors - wrap between begin() and end() if there is no permanent | 225 | * Buffer accessors - wrap between begin() and end() if there is no permanent |
181 | * kernel mapping for this buffer. | 226 | * kernel mapping for this buffer. |
diff --git a/drivers/gpu/nvgpu/include/nvgpu/page_allocator.h b/drivers/gpu/nvgpu/include/nvgpu/page_allocator.h index 9a5ef8d3..de83ca7f 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/page_allocator.h +++ b/drivers/gpu/nvgpu/include/nvgpu/page_allocator.h | |||
@@ -18,6 +18,7 @@ | |||
18 | #define PAGE_ALLOCATOR_PRIV_H | 18 | #define PAGE_ALLOCATOR_PRIV_H |
19 | 19 | ||
20 | #include <nvgpu/allocator.h> | 20 | #include <nvgpu/allocator.h> |
21 | #include <nvgpu/nvgpu_mem.h> | ||
21 | #include <nvgpu/kmem.h> | 22 | #include <nvgpu/kmem.h> |
22 | #include <nvgpu/list.h> | 23 | #include <nvgpu/list.h> |
23 | #include <nvgpu/rbtree.h> | 24 | #include <nvgpu/rbtree.h> |
@@ -83,27 +84,17 @@ page_alloc_slab_page_from_list_entry(struct nvgpu_list_node *node) | |||
83 | ((uintptr_t)node - offsetof(struct page_alloc_slab_page, list_entry)); | 84 | ((uintptr_t)node - offsetof(struct page_alloc_slab_page, list_entry)); |
84 | }; | 85 | }; |
85 | 86 | ||
86 | struct page_alloc_chunk { | ||
87 | struct nvgpu_list_node list_entry; | ||
88 | |||
89 | u64 base; | ||
90 | u64 length; | ||
91 | }; | ||
92 | |||
93 | static inline struct page_alloc_chunk * | ||
94 | page_alloc_chunk_from_list_entry(struct nvgpu_list_node *node) | ||
95 | { | ||
96 | return (struct page_alloc_chunk *) | ||
97 | ((uintptr_t)node - offsetof(struct page_alloc_chunk, list_entry)); | ||
98 | }; | ||
99 | |||
100 | /* | 87 | /* |
101 | * Struct to handle internal management of page allocation. It holds a list | 88 | * Struct to handle internal management of page allocation. It holds a list |
102 | * of the chunks of pages that make up the overall allocation - much like a | 89 | * of the chunks of pages that make up the overall allocation - much like a |
103 | * scatter gather table. | 90 | * scatter gather table. |
104 | */ | 91 | */ |
105 | struct nvgpu_page_alloc { | 92 | struct nvgpu_page_alloc { |
106 | struct nvgpu_list_node alloc_chunks; | 93 | /* |
94 | * nvgpu_mem_sgl for describing the actual allocation. Convenient for | ||
95 | * GMMU mapping. | ||
96 | */ | ||
97 | struct nvgpu_mem_sgl *sgl; | ||
107 | 98 | ||
108 | int nr_chunks; | 99 | int nr_chunks; |
109 | u64 length; | 100 | u64 length; |
@@ -156,7 +147,6 @@ struct nvgpu_page_allocator { | |||
156 | int nr_slabs; | 147 | int nr_slabs; |
157 | 148 | ||
158 | struct nvgpu_kmem_cache *alloc_cache; | 149 | struct nvgpu_kmem_cache *alloc_cache; |
159 | struct nvgpu_kmem_cache *chunk_cache; | ||
160 | struct nvgpu_kmem_cache *slab_page_cache; | 150 | struct nvgpu_kmem_cache *slab_page_cache; |
161 | 151 | ||
162 | u64 flags; | 152 | u64 flags; |