summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-03-21 18:34:50 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-06 21:15:04 -0400
commit50667e097b2be567e3d2f95e23b046243bca2bf6 (patch)
treee8fc42261868c6d69844f2e92fce33f6169434d4 /drivers/gpu/nvgpu/common
parent8f2d4a3f4a0acc81bae6725d30506e92651a42b5 (diff)
gpu: nvgpu: Rename nvgpu DMA APIs
Rename the nvgpu DMA APIs from gk20a_gmmu_alloc* to nvgpu_dma_alloc*. This better reflects the purpose of the APIs (to allocate DMA suitable memory) and avoids confusion with GMMU related code. JIRA NVGPU-12 Change-Id: I673d607db56dd6e44f02008dc7b5293209ef67bf Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1325548 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common')
-rw-r--r--drivers/gpu/nvgpu/common/linux/dma.c87
-rw-r--r--drivers/gpu/nvgpu/common/semaphore.c2
2 files changed, 45 insertions, 44 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/dma.c b/drivers/gpu/nvgpu/common/linux/dma.c
index 755848ea..92182a0d 100644
--- a/drivers/gpu/nvgpu/common/linux/dma.c
+++ b/drivers/gpu/nvgpu/common/linux/dma.c
@@ -23,7 +23,7 @@
23#include "gk20a/gk20a.h" 23#include "gk20a/gk20a.h"
24 24
25#if defined(CONFIG_GK20A_VIDMEM) 25#if defined(CONFIG_GK20A_VIDMEM)
26static u64 __gk20a_gmmu_alloc(struct nvgpu_allocator *allocator, dma_addr_t at, 26static u64 __nvgpu_dma_alloc(struct nvgpu_allocator *allocator, dma_addr_t at,
27 size_t size) 27 size_t size)
28{ 28{
29 u64 addr = 0; 29 u64 addr = 0;
@@ -38,11 +38,11 @@ static u64 __gk20a_gmmu_alloc(struct nvgpu_allocator *allocator, dma_addr_t at,
38#endif 38#endif
39 39
40#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0) 40#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
41static void gk20a_dma_flags_to_attrs(unsigned long *attrs, 41static void nvgpu_dma_flags_to_attrs(unsigned long *attrs,
42 unsigned long flags) 42 unsigned long flags)
43#define ATTR_ARG(x) *x 43#define ATTR_ARG(x) *x
44#else 44#else
45static void gk20a_dma_flags_to_attrs(struct dma_attrs *attrs, 45static void nvgpu_dma_flags_to_attrs(struct dma_attrs *attrs,
46 unsigned long flags) 46 unsigned long flags)
47#define ATTR_ARG(x) x 47#define ATTR_ARG(x) x
48#endif 48#endif
@@ -56,12 +56,12 @@ static void gk20a_dma_flags_to_attrs(struct dma_attrs *attrs,
56#undef ATTR_ARG 56#undef ATTR_ARG
57} 57}
58 58
59int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem) 59int nvgpu_dma_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
60{ 60{
61 return gk20a_gmmu_alloc_flags(g, 0, size, mem); 61 return nvgpu_dma_alloc_flags(g, 0, size, mem);
62} 62}
63 63
64int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size, 64int nvgpu_dma_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
65 struct nvgpu_mem *mem) 65 struct nvgpu_mem *mem)
66{ 66{
67 if (g->mm.vidmem_is_vidmem) { 67 if (g->mm.vidmem_is_vidmem) {
@@ -71,7 +71,7 @@ int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
71 * using gk20a_gmmu_alloc_map and it's vidmem, or if there's a 71 * using gk20a_gmmu_alloc_map and it's vidmem, or if there's a
72 * difference, the user should use the flag explicitly anyway. 72 * difference, the user should use the flag explicitly anyway.
73 */ 73 */
74 int err = gk20a_gmmu_alloc_flags_vid(g, 74 int err = nvgpu_dma_alloc_flags_vid(g,
75 flags | NVGPU_DMA_NO_KERNEL_MAPPING, 75 flags | NVGPU_DMA_NO_KERNEL_MAPPING,
76 size, mem); 76 size, mem);
77 77
@@ -83,15 +83,15 @@ int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
83 */ 83 */
84 } 84 }
85 85
86 return gk20a_gmmu_alloc_flags_sys(g, flags, size, mem); 86 return nvgpu_dma_alloc_flags_sys(g, flags, size, mem);
87} 87}
88 88
89int gk20a_gmmu_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem) 89int nvgpu_dma_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
90{ 90{
91 return gk20a_gmmu_alloc_flags_sys(g, 0, size, mem); 91 return nvgpu_dma_alloc_flags_sys(g, 0, size, mem);
92} 92}
93 93
94int gk20a_gmmu_alloc_flags_sys(struct gk20a *g, unsigned long flags, 94int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
95 size_t size, struct nvgpu_mem *mem) 95 size_t size, struct nvgpu_mem *mem)
96{ 96{
97 struct device *d = dev_from_gk20a(g); 97 struct device *d = dev_from_gk20a(g);
@@ -103,7 +103,7 @@ int gk20a_gmmu_alloc_flags_sys(struct gk20a *g, unsigned long flags,
103 if (flags) { 103 if (flags) {
104 DEFINE_DMA_ATTRS(dma_attrs); 104 DEFINE_DMA_ATTRS(dma_attrs);
105 105
106 gk20a_dma_flags_to_attrs(&dma_attrs, flags); 106 nvgpu_dma_flags_to_attrs(&dma_attrs, flags);
107 107
108 if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) { 108 if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) {
109 mem->pages = dma_alloc_attrs(d, 109 mem->pages = dma_alloc_attrs(d,
@@ -149,19 +149,19 @@ fail_free:
149 return err; 149 return err;
150} 150}
151 151
152int gk20a_gmmu_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem) 152int nvgpu_dma_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
153{ 153{
154 return gk20a_gmmu_alloc_flags_vid(g, 154 return nvgpu_dma_alloc_flags_vid(g,
155 NVGPU_DMA_NO_KERNEL_MAPPING, size, mem); 155 NVGPU_DMA_NO_KERNEL_MAPPING, size, mem);
156} 156}
157 157
158int gk20a_gmmu_alloc_flags_vid(struct gk20a *g, unsigned long flags, 158int nvgpu_dma_alloc_flags_vid(struct gk20a *g, unsigned long flags,
159 size_t size, struct nvgpu_mem *mem) 159 size_t size, struct nvgpu_mem *mem)
160{ 160{
161 return gk20a_gmmu_alloc_flags_vid_at(g, flags, size, mem, 0); 161 return nvgpu_dma_alloc_flags_vid_at(g, flags, size, mem, 0);
162} 162}
163 163
164int gk20a_gmmu_alloc_flags_vid_at(struct gk20a *g, unsigned long flags, 164int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
165 size_t size, struct nvgpu_mem *mem, dma_addr_t at) 165 size_t size, struct nvgpu_mem *mem, dma_addr_t at)
166{ 166{
167#if defined(CONFIG_GK20A_VIDMEM) 167#if defined(CONFIG_GK20A_VIDMEM)
@@ -185,7 +185,7 @@ int gk20a_gmmu_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
185 185
186 nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex); 186 nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex);
187 before_pending = atomic64_read(&g->mm.vidmem.bytes_pending); 187 before_pending = atomic64_read(&g->mm.vidmem.bytes_pending);
188 addr = __gk20a_gmmu_alloc(vidmem_alloc, at, size); 188 addr = __nvgpu_dma_alloc(vidmem_alloc, at, size);
189 nvgpu_mutex_release(&g->mm.vidmem.clear_list_mutex); 189 nvgpu_mutex_release(&g->mm.vidmem.clear_list_mutex);
190 if (!addr) { 190 if (!addr) {
191 /* 191 /*
@@ -237,23 +237,23 @@ fail_physfree:
237#endif 237#endif
238} 238}
239 239
240int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, size_t size, 240int nvgpu_dma_alloc_map(struct vm_gk20a *vm, size_t size,
241 struct nvgpu_mem *mem) 241 struct nvgpu_mem *mem)
242{ 242{
243 return gk20a_gmmu_alloc_map_flags(vm, 0, size, mem); 243 return nvgpu_dma_alloc_map_flags(vm, 0, size, mem);
244} 244}
245 245
246int gk20a_gmmu_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags, 246int nvgpu_dma_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
247 size_t size, struct nvgpu_mem *mem) 247 size_t size, struct nvgpu_mem *mem)
248{ 248{
249 if (vm->mm->vidmem_is_vidmem) { 249 if (vm->mm->vidmem_is_vidmem) {
250 /* 250 /*
251 * Force the no-kernel-mapping flag on because we don't support 251 * Force the no-kernel-mapping flag on because we don't support
252 * the lack of it for vidmem - the user should not care when 252 * the lack of it for vidmem - the user should not care when
253 * using gk20a_gmmu_alloc_map and it's vidmem, or if there's a 253 * using nvgpu_dma_alloc_map and it's vidmem, or if there's a
254 * difference, the user should use the flag explicitly anyway. 254 * difference, the user should use the flag explicitly anyway.
255 */ 255 */
256 int err = gk20a_gmmu_alloc_map_flags_vid(vm, 256 int err = nvgpu_dma_alloc_map_flags_vid(vm,
257 flags | NVGPU_DMA_NO_KERNEL_MAPPING, 257 flags | NVGPU_DMA_NO_KERNEL_MAPPING,
258 size, mem); 258 size, mem);
259 259
@@ -265,19 +265,19 @@ int gk20a_gmmu_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
265 */ 265 */
266 } 266 }
267 267
268 return gk20a_gmmu_alloc_map_flags_sys(vm, flags, size, mem); 268 return nvgpu_dma_alloc_map_flags_sys(vm, flags, size, mem);
269} 269}
270 270
271int gk20a_gmmu_alloc_map_sys(struct vm_gk20a *vm, size_t size, 271int nvgpu_dma_alloc_map_sys(struct vm_gk20a *vm, size_t size,
272 struct nvgpu_mem *mem) 272 struct nvgpu_mem *mem)
273{ 273{
274 return gk20a_gmmu_alloc_map_flags_sys(vm, 0, size, mem); 274 return nvgpu_dma_alloc_map_flags_sys(vm, 0, size, mem);
275} 275}
276 276
277int gk20a_gmmu_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags, 277int nvgpu_dma_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags,
278 size_t size, struct nvgpu_mem *mem) 278 size_t size, struct nvgpu_mem *mem)
279{ 279{
280 int err = gk20a_gmmu_alloc_flags_sys(vm->mm->g, flags, size, mem); 280 int err = nvgpu_dma_alloc_flags_sys(vm->mm->g, flags, size, mem);
281 281
282 if (err) 282 if (err)
283 return err; 283 return err;
@@ -293,21 +293,21 @@ int gk20a_gmmu_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags,
293 return 0; 293 return 0;
294 294
295fail_free: 295fail_free:
296 gk20a_gmmu_free(vm->mm->g, mem); 296 nvgpu_dma_free(vm->mm->g, mem);
297 return err; 297 return err;
298} 298}
299 299
300int gk20a_gmmu_alloc_map_vid(struct vm_gk20a *vm, size_t size, 300int nvgpu_dma_alloc_map_vid(struct vm_gk20a *vm, size_t size,
301 struct nvgpu_mem *mem) 301 struct nvgpu_mem *mem)
302{ 302{
303 return gk20a_gmmu_alloc_map_flags_vid(vm, 303 return nvgpu_dma_alloc_map_flags_vid(vm,
304 NVGPU_DMA_NO_KERNEL_MAPPING, size, mem); 304 NVGPU_DMA_NO_KERNEL_MAPPING, size, mem);
305} 305}
306 306
307int gk20a_gmmu_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags, 307int nvgpu_dma_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags,
308 size_t size, struct nvgpu_mem *mem) 308 size_t size, struct nvgpu_mem *mem)
309{ 309{
310 int err = gk20a_gmmu_alloc_flags_vid(vm->mm->g, flags, size, mem); 310 int err = nvgpu_dma_alloc_flags_vid(vm->mm->g, flags, size, mem);
311 311
312 if (err) 312 if (err)
313 return err; 313 return err;
@@ -323,11 +323,11 @@ int gk20a_gmmu_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags,
323 return 0; 323 return 0;
324 324
325fail_free: 325fail_free:
326 gk20a_gmmu_free(vm->mm->g, mem); 326 nvgpu_dma_free(vm->mm->g, mem);
327 return err; 327 return err;
328} 328}
329 329
330static void gk20a_gmmu_free_sys(struct gk20a *g, struct nvgpu_mem *mem) 330static void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
331{ 331{
332 struct device *d = dev_from_gk20a(g); 332 struct device *d = dev_from_gk20a(g);
333 333
@@ -335,7 +335,7 @@ static void gk20a_gmmu_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
335 if (mem->flags) { 335 if (mem->flags) {
336 DEFINE_DMA_ATTRS(dma_attrs); 336 DEFINE_DMA_ATTRS(dma_attrs);
337 337
338 gk20a_dma_flags_to_attrs(&dma_attrs, mem->flags); 338 nvgpu_dma_flags_to_attrs(&dma_attrs, mem->flags);
339 339
340 if (mem->flags & NVGPU_DMA_NO_KERNEL_MAPPING) { 340 if (mem->flags & NVGPU_DMA_NO_KERNEL_MAPPING) {
341 dma_free_attrs(d, mem->size, mem->pages, 341 dma_free_attrs(d, mem->size, mem->pages,
@@ -361,7 +361,7 @@ static void gk20a_gmmu_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
361 mem->aperture = APERTURE_INVALID; 361 mem->aperture = APERTURE_INVALID;
362} 362}
363 363
364static void gk20a_gmmu_free_vid(struct gk20a *g, struct nvgpu_mem *mem) 364static void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem)
365{ 365{
366#if defined(CONFIG_GK20A_VIDMEM) 366#if defined(CONFIG_GK20A_VIDMEM)
367 bool was_empty; 367 bool was_empty;
@@ -393,23 +393,24 @@ static void gk20a_gmmu_free_vid(struct gk20a *g, struct nvgpu_mem *mem)
393#endif 393#endif
394} 394}
395 395
396void gk20a_gmmu_free(struct gk20a *g, struct nvgpu_mem *mem) 396void nvgpu_dma_free(struct gk20a *g, struct nvgpu_mem *mem)
397{ 397{
398 switch (mem->aperture) { 398 switch (mem->aperture) {
399 case APERTURE_SYSMEM: 399 case APERTURE_SYSMEM:
400 return gk20a_gmmu_free_sys(g, mem); 400 return nvgpu_dma_free_sys(g, mem);
401 case APERTURE_VIDMEM: 401 case APERTURE_VIDMEM:
402 return gk20a_gmmu_free_vid(g, mem); 402 return nvgpu_dma_free_vid(g, mem);
403 default: 403 default:
404 break; /* like free() on "null" memory */ 404 break; /* like free() on "null" memory */
405 } 405 }
406} 406}
407 407
408void gk20a_gmmu_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem) 408void nvgpu_dma_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem)
409{ 409{
410 if (mem->gpu_va) 410 if (mem->gpu_va)
411 gk20a_gmmu_unmap(vm, mem->gpu_va, mem->size, gk20a_mem_flag_none); 411 gk20a_gmmu_unmap(vm, mem->gpu_va, mem->size,
412 gk20a_mem_flag_none);
412 mem->gpu_va = 0; 413 mem->gpu_va = 0;
413 414
414 gk20a_gmmu_free(vm->mm->g, mem); 415 nvgpu_dma_free(vm->mm->g, mem);
415} 416}
diff --git a/drivers/gpu/nvgpu/common/semaphore.c b/drivers/gpu/nvgpu/common/semaphore.c
index cfe1149f..7c9bf9da 100644
--- a/drivers/gpu/nvgpu/common/semaphore.c
+++ b/drivers/gpu/nvgpu/common/semaphore.c
@@ -53,7 +53,7 @@ static int __nvgpu_semaphore_sea_grow(struct nvgpu_semaphore_sea *sea)
53 53
54 __lock_sema_sea(sea); 54 __lock_sema_sea(sea);
55 55
56 ret = gk20a_gmmu_alloc_flags_sys(gk20a, NVGPU_DMA_NO_KERNEL_MAPPING, 56 ret = nvgpu_dma_alloc_flags_sys(gk20a, NVGPU_DMA_NO_KERNEL_MAPPING,
57 PAGE_SIZE * SEMAPHORE_POOL_COUNT, 57 PAGE_SIZE * SEMAPHORE_POOL_COUNT,
58 &sea->sea_mem); 58 &sea->sea_mem);
59 if (ret) 59 if (ret)