summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-04-10 16:27:47 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-25 17:25:54 -0400
commit6a14d980cfdce5609c0eb7b20e2da3d98fbbccb8 (patch)
treeca04bbb2a27f28958bd7f884fef0327063b9152e /drivers/gpu
parent39524b094180ab747287bc893e217dcbe5029e64 (diff)
gpu: nvgpu: Add sub-nvgpu_mem
Add an API for creating a special sub-nvgpu_mem struct. This struct comes with some fairly important caveats but is very useful for the semaphore code. Also, make sure that in nvgpu_mem_begin() and nvgpu_mem_end() no additional mappings are made if not necessary. More importantly during nvgpu_mem_end() it would be possible to vunmap() a CPU mapping of a DMA allocation that does not expect this to happen. JIRA NVGPU-12 JIRA NVGPU-30 Change-Id: I579429da9ff7288488753a113bafc558e0f17a0f Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1464077 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/nvgpu/common/linux/dma.c3
-rw-r--r--drivers/gpu/nvgpu/common/linux/nvgpu_mem.c70
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h43
3 files changed, 115 insertions, 1 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/dma.c b/drivers/gpu/nvgpu/common/linux/dma.c
index 832d0f47..7453fdef 100644
--- a/drivers/gpu/nvgpu/common/linux/dma.c
+++ b/drivers/gpu/nvgpu/common/linux/dma.c
@@ -334,7 +334,8 @@ static void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
334{ 334{
335 struct device *d = dev_from_gk20a(g); 335 struct device *d = dev_from_gk20a(g);
336 336
337 if (mem->cpu_va || mem->priv.pages) { 337 if (!(mem->mem_flags & NVGPU_MEM_FLAG_SHADOW_COPY) &&
338 (mem->cpu_va || mem->priv.pages)) {
338 if (mem->priv.flags) { 339 if (mem->priv.flags) {
339 DEFINE_DMA_ATTRS(dma_attrs); 340 DEFINE_DMA_ATTRS(dma_attrs);
340 341
diff --git a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
index bb19dd61..fb7ee7fe 100644
--- a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
@@ -14,6 +14,7 @@
14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */ 15 */
16 16
17#include <nvgpu/dma.h>
17#include <nvgpu/nvgpu_mem.h> 18#include <nvgpu/nvgpu_mem.h>
18#include <nvgpu/page_allocator.h> 19#include <nvgpu/page_allocator.h>
19#include <nvgpu/log.h> 20#include <nvgpu/log.h>
@@ -52,6 +53,14 @@ int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem)
52 if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin) 53 if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin)
53 return 0; 54 return 0;
54 55
56 /*
57 * A CPU mapping is implicitly made for all SYSMEM DMA allocations that
58 * don't have NVGPU_DMA_NO_KERNEL_MAPPING. Thus we don't need to make
59 * another CPU mapping.
60 */
61 if (!(mem->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING))
62 return 0;
63
55 if (WARN_ON(mem->cpu_va)) { 64 if (WARN_ON(mem->cpu_va)) {
56 nvgpu_warn(g, "nested"); 65 nvgpu_warn(g, "nested");
57 return -EBUSY; 66 return -EBUSY;
@@ -73,6 +82,13 @@ void nvgpu_mem_end(struct gk20a *g, struct nvgpu_mem *mem)
73 if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin) 82 if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin)
74 return; 83 return;
75 84
85 /*
86 * Similar to nvgpu_mem_begin() we don't need to unmap the CPU mapping
87 * already made by the DMA API.
88 */
89 if (!(mem->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING))
90 return;
91
76 vunmap(mem->cpu_va); 92 vunmap(mem->cpu_va);
77 mem->cpu_va = NULL; 93 mem->cpu_va = NULL;
78} 94}
@@ -225,3 +241,57 @@ void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
225 WARN_ON("Accessing unallocated nvgpu_mem"); 241 WARN_ON("Accessing unallocated nvgpu_mem");
226 } 242 }
227} 243}
244
245/*
246 * Be careful how you use this! You are responsible for correctly freeing this
247 * memory.
248 */
249int nvgpu_mem_create_from_mem(struct gk20a *g,
250 struct nvgpu_mem *dest, struct nvgpu_mem *src,
251 int start_page, int nr_pages)
252{
253 int ret;
254 u64 start = start_page * PAGE_SIZE;
255 u64 size = nr_pages * PAGE_SIZE;
256 dma_addr_t new_iova;
257
258 if (src->aperture != APERTURE_SYSMEM)
259 return -EINVAL;
260
261 /* Some silly things a caller might do... */
262 if (size > src->size)
263 return -EINVAL;
264 if ((start + size) > src->size)
265 return -EINVAL;
266
267 dest->mem_flags = src->mem_flags | NVGPU_MEM_FLAG_SHADOW_COPY;
268 dest->aperture = src->aperture;
269 dest->skip_wmb = src->skip_wmb;
270 dest->size = size;
271
272 /*
273 * Re-use the CPU mapping only if the mapping was made by the DMA API.
274 */
275 if (!(src->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING))
276 dest->cpu_va = src->cpu_va + (PAGE_SIZE * start_page);
277
278 dest->priv.pages = src->priv.pages + start_page;
279 dest->priv.flags = src->priv.flags;
280
281 new_iova = sg_dma_address(src->priv.sgt->sgl) ?
282 sg_dma_address(src->priv.sgt->sgl) + start : 0;
283
284 /*
285 * Make a new SG table that is based only on the subset of pages that
286 * is passed to us. This table gets freed by the dma free routines.
287 */
288 if (src->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING)
289 ret = gk20a_get_sgtable_from_pages(g->dev, &dest->priv.sgt,
290 src->priv.pages + start_page,
291 new_iova, size);
292 else
293 ret = gk20a_get_sgtable(g->dev, &dest->priv.sgt, dest->cpu_va,
294 new_iova, size);
295
296 return ret;
297}
diff --git a/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h b/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h
index 1590ee7a..397e9ab1 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h
@@ -49,6 +49,13 @@ struct nvgpu_mem {
49 bool skip_wmb; 49 bool skip_wmb;
50 50
51 /* 51 /*
52 * Set when a nvgpu_mem struct is not a "real" nvgpu_mem struct. Instead
53 * the struct is just a copy of another nvgpu_mem struct.
54 */
55#define NVGPU_MEM_FLAG_SHADOW_COPY (1 << 0)
56 unsigned long mem_flags;
57
58 /*
52 * Only populated for a sysmem allocation. 59 * Only populated for a sysmem allocation.
53 */ 60 */
54 void *cpu_va; 61 void *cpu_va;
@@ -86,6 +93,42 @@ static inline const char *nvgpu_aperture_str(enum nvgpu_aperture aperture)
86 return "UNKNOWN"; 93 return "UNKNOWN";
87} 94}
88 95
96/**
97 * nvgpu_mem_create_from_mem - Create a new nvgpu_mem struct from an old one.
98 *
99 * @g - The GPU.
100 * @dest - Destination nvgpu_mem to hold resulting memory description.
101 * @src - Source memory. Must be valid.
102 * @start_page - Starting page to use.
103 * @nr_pages - Number of pages to place in the new nvgpu_mem.
104 *
105 * Create a new nvgpu_mem struct describing a subsection of the @src nvgpu_mem.
106 * This will create an nvpgu_mem object starting at @start_page and is @nr_pages
107 * long. This currently only works on SYSMEM nvgpu_mems. If this is called on a
108 * VIDMEM nvgpu_mem then this will return an error.
109 *
110 * There is a _major_ caveat to this API: if the source buffer is freed before
111 * the copy is freed then the copy will become invalid. This is a result from
112 * how typical DMA APIs work: we can't call free on the buffer multiple times.
113 * Nor can we call free on parts of a buffer. Thus the only way to ensure that
114 * the entire buffer is actually freed is to call free once on the source
115 * buffer. Since these nvgpu_mem structs are not ref-counted in anyway it is up
116 * to the caller of this API to _ensure_ that the resulting nvgpu_mem buffer
117 * from this API is freed before the source buffer. Otherwise there can and will
118 * be memory corruption.
119 *
120 * The resulting nvgpu_mem should be released with the nvgpu_dma_free() or the
121 * nvgpu_dma_unmap_free() function depending on whether or not the resulting
122 * nvgpu_mem has been mapped.
123 *
124 * This will return 0 on success. An error is returned if the resulting
125 * nvgpu_mem would not make sense or if a new scatter gather table cannot be
126 * created.
127 */
128int nvgpu_mem_create_from_mem(struct gk20a *g,
129 struct nvgpu_mem *dest, struct nvgpu_mem *src,
130 int start_page, int nr_pages);
131
89/* 132/*
90 * Buffer accessors - wrap between begin() and end() if there is no permanent 133 * Buffer accessors - wrap between begin() and end() if there is no permanent
91 * kernel mapping for this buffer. 134 * kernel mapping for this buffer.