summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2016-12-20 16:55:48 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-01-09 15:33:16 -0500
commit6df3992b60959d32c7113cb77e131a2547174f3a (patch)
treeefbdc9e6ccd2330d5c469ca0783ecb0137da8fc4
parente229514bece5a109cdbfe263f6329efe987e5939 (diff)
gpu: nvgpu: Move allocators to common/mm/
Move the GPU allocators to common/mm/ since the allocators are common code across all GPUs. Also rename the allocator code to move away from gk20a_ prefixed structs and functions. This caused one issue with the nvgpu_alloc() and nvgpu_free() functions. There was a function for allocating either with kmalloc() or vmalloc() depending on the size of the allocation. Those have now been renamed to nvgpu_kalloc() and nvgpu_kfree(). Bug 1799159 Change-Id: Iddda92c013612bcb209847084ec85b8953002fa5 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1274400 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/Makefile.nvgpu10
-rw-r--r--drivers/gpu/nvgpu/common/mm/bitmap_allocator.c (renamed from drivers/gpu/nvgpu/gk20a/gk20a_allocator_bitmap.c)105
-rw-r--r--drivers/gpu/nvgpu/common/mm/bitmap_allocator_priv.h (renamed from drivers/gpu/nvgpu/gk20a/bitmap_allocator_priv.h)14
-rw-r--r--drivers/gpu/nvgpu/common/mm/buddy_allocator.c (renamed from drivers/gpu/nvgpu/gk20a/gk20a_allocator_buddy.c)296
-rw-r--r--drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h (renamed from drivers/gpu/nvgpu/gk20a/buddy_allocator_priv.h)60
-rw-r--r--drivers/gpu/nvgpu/common/mm/lockless_allocator.c (renamed from drivers/gpu/nvgpu/gk20a/gk20a_allocator_lockless.c)65
-rw-r--r--drivers/gpu/nvgpu/common/mm/lockless_allocator_priv.h (renamed from drivers/gpu/nvgpu/gk20a/lockless_allocator_priv.h)12
-rw-r--r--drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c (renamed from drivers/gpu/nvgpu/gk20a/gk20a_allocator.c)59
-rw-r--r--drivers/gpu/nvgpu/common/mm/page_allocator.c (renamed from drivers/gpu/nvgpu/gk20a/gk20a_allocator_page.c)231
-rw-r--r--drivers/gpu/nvgpu/gk20a/as_gk20a.c10
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c12
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/debug_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/fence_gk20a.c16
-rw-r--r--drivers/gpu/nvgpu/gk20a/fence_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.c3
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c6
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c114
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h23
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c26
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/semaphore_gk20a.h3
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/allocator.h (renamed from drivers/gpu/nvgpu/gk20a/gk20a_allocator.h)120
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/page_allocator.h (renamed from drivers/gpu/nvgpu/gk20a/page_allocator_priv.h)22
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.c20
26 files changed, 624 insertions, 615 deletions
diff --git a/drivers/gpu/nvgpu/Makefile.nvgpu b/drivers/gpu/nvgpu/Makefile.nvgpu
index 93629eff..afce062b 100644
--- a/drivers/gpu/nvgpu/Makefile.nvgpu
+++ b/drivers/gpu/nvgpu/Makefile.nvgpu
@@ -23,6 +23,11 @@ obj-$(CONFIG_GK20A) := nvgpu.o
23 23
24nvgpu-y := \ 24nvgpu-y := \
25 common/linux/timers.o \ 25 common/linux/timers.o \
26 common/mm/nvgpu_allocator.o \
27 common/mm/bitmap_allocator.o \
28 common/mm/buddy_allocator.o \
29 common/mm/page_allocator.o \
30 common/mm/lockless_allocator.o \
26 nvgpu_common.o \ 31 nvgpu_common.o \
27 gk20a/gk20a.o \ 32 gk20a/gk20a.o \
28 gk20a/sched_gk20a.o \ 33 gk20a/sched_gk20a.o \
@@ -51,11 +56,6 @@ nvgpu-y := \
51 gk20a/fb_gk20a.o \ 56 gk20a/fb_gk20a.o \
52 gk20a/hal.o \ 57 gk20a/hal.o \
53 gk20a/hal_gk20a.o \ 58 gk20a/hal_gk20a.o \
54 gk20a/gk20a_allocator.o \
55 gk20a/gk20a_allocator_bitmap.o \
56 gk20a/gk20a_allocator_buddy.o \
57 gk20a/gk20a_allocator_page.o \
58 gk20a/gk20a_allocator_lockless.o \
59 gk20a/cde_gk20a.o \ 59 gk20a/cde_gk20a.o \
60 gk20a/platform_gk20a_generic.o \ 60 gk20a/platform_gk20a_generic.o \
61 gk20a/tsg_gk20a.o \ 61 gk20a/tsg_gk20a.o \
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_allocator_bitmap.c b/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c
index f98e0782..6f267c85 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a_allocator_bitmap.c
+++ b/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c
@@ -18,46 +18,47 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/bitops.h> 19#include <linux/bitops.h>
20 20
21#include "gk20a_allocator.h" 21#include <nvgpu/allocator.h>
22
22#include "bitmap_allocator_priv.h" 23#include "bitmap_allocator_priv.h"
23 24
24static struct kmem_cache *meta_data_cache; /* slab cache for meta data. */ 25static struct kmem_cache *meta_data_cache; /* slab cache for meta data. */
25static DEFINE_MUTEX(meta_data_cache_lock); 26static DEFINE_MUTEX(meta_data_cache_lock);
26 27
27static u64 gk20a_bitmap_alloc_length(struct gk20a_allocator *a) 28static u64 nvgpu_bitmap_alloc_length(struct nvgpu_allocator *a)
28{ 29{
29 struct gk20a_bitmap_allocator *ba = a->priv; 30 struct nvgpu_bitmap_allocator *ba = a->priv;
30 31
31 return ba->length; 32 return ba->length;
32} 33}
33 34
34static u64 gk20a_bitmap_alloc_base(struct gk20a_allocator *a) 35static u64 nvgpu_bitmap_alloc_base(struct nvgpu_allocator *a)
35{ 36{
36 struct gk20a_bitmap_allocator *ba = a->priv; 37 struct nvgpu_bitmap_allocator *ba = a->priv;
37 38
38 return ba->base; 39 return ba->base;
39} 40}
40 41
41static int gk20a_bitmap_alloc_inited(struct gk20a_allocator *a) 42static int nvgpu_bitmap_alloc_inited(struct nvgpu_allocator *a)
42{ 43{
43 struct gk20a_bitmap_allocator *ba = a->priv; 44 struct nvgpu_bitmap_allocator *ba = a->priv;
44 int inited = ba->inited; 45 int inited = ba->inited;
45 46
46 rmb(); 47 rmb();
47 return inited; 48 return inited;
48} 49}
49 50
50static u64 gk20a_bitmap_alloc_end(struct gk20a_allocator *a) 51static u64 nvgpu_bitmap_alloc_end(struct nvgpu_allocator *a)
51{ 52{
52 struct gk20a_bitmap_allocator *ba = a->priv; 53 struct nvgpu_bitmap_allocator *ba = a->priv;
53 54
54 return ba->base + ba->length; 55 return ba->base + ba->length;
55} 56}
56 57
57static u64 gk20a_bitmap_alloc_fixed(struct gk20a_allocator *__a, 58static u64 nvgpu_bitmap_alloc_fixed(struct nvgpu_allocator *__a,
58 u64 base, u64 len) 59 u64 base, u64 len)
59{ 60{
60 struct gk20a_bitmap_allocator *a = bitmap_allocator(__a); 61 struct nvgpu_bitmap_allocator *a = bitmap_allocator(__a);
61 u64 blks, offs, ret; 62 u64 blks, offs, ret;
62 63
63 /* Compute the bit offset and make sure it's aligned to a block. */ 64 /* Compute the bit offset and make sure it's aligned to a block. */
@@ -101,10 +102,10 @@ fail:
101 * Note: this function won't do much error checking. Thus you could really 102 * Note: this function won't do much error checking. Thus you could really
102 * confuse the allocator if you misuse this function. 103 * confuse the allocator if you misuse this function.
103 */ 104 */
104static void gk20a_bitmap_free_fixed(struct gk20a_allocator *__a, 105static void nvgpu_bitmap_free_fixed(struct nvgpu_allocator *__a,
105 u64 base, u64 len) 106 u64 base, u64 len)
106{ 107{
107 struct gk20a_bitmap_allocator *a = bitmap_allocator(__a); 108 struct nvgpu_bitmap_allocator *a = bitmap_allocator(__a);
108 u64 blks, offs; 109 u64 blks, offs;
109 110
110 offs = base >> a->blk_shift; 111 offs = base >> a->blk_shift;
@@ -129,15 +130,15 @@ static void gk20a_bitmap_free_fixed(struct gk20a_allocator *__a,
129/* 130/*
130 * Add the passed alloc to the tree of stored allocations. 131 * Add the passed alloc to the tree of stored allocations.
131 */ 132 */
132static void insert_alloc_metadata(struct gk20a_bitmap_allocator *a, 133static void insert_alloc_metadata(struct nvgpu_bitmap_allocator *a,
133 struct gk20a_bitmap_alloc *alloc) 134 struct nvgpu_bitmap_alloc *alloc)
134{ 135{
135 struct rb_node **new = &a->allocs.rb_node; 136 struct rb_node **new = &a->allocs.rb_node;
136 struct rb_node *parent = NULL; 137 struct rb_node *parent = NULL;
137 struct gk20a_bitmap_alloc *tmp; 138 struct nvgpu_bitmap_alloc *tmp;
138 139
139 while (*new) { 140 while (*new) {
140 tmp = container_of(*new, struct gk20a_bitmap_alloc, 141 tmp = container_of(*new, struct nvgpu_bitmap_alloc,
141 alloc_entry); 142 alloc_entry);
142 143
143 parent = *new; 144 parent = *new;
@@ -158,14 +159,14 @@ static void insert_alloc_metadata(struct gk20a_bitmap_allocator *a,
158/* 159/*
159 * Find and remove meta-data from the outstanding allocations. 160 * Find and remove meta-data from the outstanding allocations.
160 */ 161 */
161static struct gk20a_bitmap_alloc *find_alloc_metadata( 162static struct nvgpu_bitmap_alloc *find_alloc_metadata(
162 struct gk20a_bitmap_allocator *a, u64 addr) 163 struct nvgpu_bitmap_allocator *a, u64 addr)
163{ 164{
164 struct rb_node *node = a->allocs.rb_node; 165 struct rb_node *node = a->allocs.rb_node;
165 struct gk20a_bitmap_alloc *alloc; 166 struct nvgpu_bitmap_alloc *alloc;
166 167
167 while (node) { 168 while (node) {
168 alloc = container_of(node, struct gk20a_bitmap_alloc, 169 alloc = container_of(node, struct nvgpu_bitmap_alloc,
169 alloc_entry); 170 alloc_entry);
170 171
171 if (addr < alloc->base) 172 if (addr < alloc->base)
@@ -187,10 +188,10 @@ static struct gk20a_bitmap_alloc *find_alloc_metadata(
187/* 188/*
188 * Tree of alloc meta data stores the address of the alloc not the bit offset. 189 * Tree of alloc meta data stores the address of the alloc not the bit offset.
189 */ 190 */
190static int __gk20a_bitmap_store_alloc(struct gk20a_bitmap_allocator *a, 191static int __nvgpu_bitmap_store_alloc(struct nvgpu_bitmap_allocator *a,
191 u64 addr, u64 len) 192 u64 addr, u64 len)
192{ 193{
193 struct gk20a_bitmap_alloc *alloc = 194 struct nvgpu_bitmap_alloc *alloc =
194 kmem_cache_alloc(meta_data_cache, GFP_KERNEL); 195 kmem_cache_alloc(meta_data_cache, GFP_KERNEL);
195 196
196 if (!alloc) 197 if (!alloc)
@@ -208,11 +209,11 @@ static int __gk20a_bitmap_store_alloc(struct gk20a_bitmap_allocator *a,
208 * @len is in bytes. This routine will figure out the right number of bits to 209 * @len is in bytes. This routine will figure out the right number of bits to
209 * actually allocate. The return is the address in bytes as well. 210 * actually allocate. The return is the address in bytes as well.
210 */ 211 */
211static u64 gk20a_bitmap_alloc(struct gk20a_allocator *__a, u64 len) 212static u64 nvgpu_bitmap_alloc(struct nvgpu_allocator *__a, u64 len)
212{ 213{
213 u64 blks, addr; 214 u64 blks, addr;
214 unsigned long offs, adjusted_offs, limit; 215 unsigned long offs, adjusted_offs, limit;
215 struct gk20a_bitmap_allocator *a = bitmap_allocator(__a); 216 struct nvgpu_bitmap_allocator *a = bitmap_allocator(__a);
216 217
217 blks = len >> a->blk_shift; 218 blks = len >> a->blk_shift;
218 219
@@ -255,7 +256,7 @@ static u64 gk20a_bitmap_alloc(struct gk20a_allocator *__a, u64 len)
255 * allocation. 256 * allocation.
256 */ 257 */
257 if (!(a->flags & GPU_ALLOC_NO_ALLOC_PAGE) && 258 if (!(a->flags & GPU_ALLOC_NO_ALLOC_PAGE) &&
258 __gk20a_bitmap_store_alloc(a, addr, blks * a->blk_size)) 259 __nvgpu_bitmap_store_alloc(a, addr, blks * a->blk_size))
259 goto fail_reset_bitmap; 260 goto fail_reset_bitmap;
260 261
261 alloc_dbg(__a, "Alloc 0x%-10llx 0x%-5llx [bits=0x%llx (%llu)]\n", 262 alloc_dbg(__a, "Alloc 0x%-10llx 0x%-5llx [bits=0x%llx (%llu)]\n",
@@ -276,10 +277,10 @@ fail:
276 return 0; 277 return 0;
277} 278}
278 279
279static void gk20a_bitmap_free(struct gk20a_allocator *__a, u64 addr) 280static void nvgpu_bitmap_free(struct nvgpu_allocator *__a, u64 addr)
280{ 281{
281 struct gk20a_bitmap_allocator *a = bitmap_allocator(__a); 282 struct nvgpu_bitmap_allocator *a = bitmap_allocator(__a);
282 struct gk20a_bitmap_alloc *alloc = NULL; 283 struct nvgpu_bitmap_alloc *alloc = NULL;
283 u64 offs, adjusted_offs, blks; 284 u64 offs, adjusted_offs, blks;
284 285
285 alloc_lock(__a); 286 alloc_lock(__a);
@@ -312,17 +313,17 @@ done:
312 alloc_unlock(__a); 313 alloc_unlock(__a);
313} 314}
314 315
315static void gk20a_bitmap_alloc_destroy(struct gk20a_allocator *__a) 316static void nvgpu_bitmap_alloc_destroy(struct nvgpu_allocator *__a)
316{ 317{
317 struct gk20a_bitmap_allocator *a = bitmap_allocator(__a); 318 struct nvgpu_bitmap_allocator *a = bitmap_allocator(__a);
318 struct gk20a_bitmap_alloc *alloc; 319 struct nvgpu_bitmap_alloc *alloc;
319 struct rb_node *node; 320 struct rb_node *node;
320 321
321 /* 322 /*
322 * Kill any outstanding allocations. 323 * Kill any outstanding allocations.
323 */ 324 */
324 while ((node = rb_first(&a->allocs)) != NULL) { 325 while ((node = rb_first(&a->allocs)) != NULL) {
325 alloc = container_of(node, struct gk20a_bitmap_alloc, 326 alloc = container_of(node, struct nvgpu_bitmap_alloc,
326 alloc_entry); 327 alloc_entry);
327 328
328 rb_erase(node, &a->allocs); 329 rb_erase(node, &a->allocs);
@@ -333,10 +334,10 @@ static void gk20a_bitmap_alloc_destroy(struct gk20a_allocator *__a)
333 kfree(a); 334 kfree(a);
334} 335}
335 336
336static void gk20a_bitmap_print_stats(struct gk20a_allocator *__a, 337static void nvgpu_bitmap_print_stats(struct nvgpu_allocator *__a,
337 struct seq_file *s, int lock) 338 struct seq_file *s, int lock)
338{ 339{
339 struct gk20a_bitmap_allocator *a = bitmap_allocator(__a); 340 struct nvgpu_bitmap_allocator *a = bitmap_allocator(__a);
340 341
341 __alloc_pstat(s, __a, "Bitmap allocator params:\n"); 342 __alloc_pstat(s, __a, "Bitmap allocator params:\n");
342 __alloc_pstat(s, __a, " start = 0x%llx\n", a->base); 343 __alloc_pstat(s, __a, " start = 0x%llx\n", a->base);
@@ -353,34 +354,34 @@ static void gk20a_bitmap_print_stats(struct gk20a_allocator *__a,
353 a->bytes_alloced - a->bytes_freed); 354 a->bytes_alloced - a->bytes_freed);
354} 355}
355 356
356static const struct gk20a_allocator_ops bitmap_ops = { 357static const struct nvgpu_allocator_ops bitmap_ops = {
357 .alloc = gk20a_bitmap_alloc, 358 .alloc = nvgpu_bitmap_alloc,
358 .free = gk20a_bitmap_free, 359 .free = nvgpu_bitmap_free,
359 360
360 .alloc_fixed = gk20a_bitmap_alloc_fixed, 361 .alloc_fixed = nvgpu_bitmap_alloc_fixed,
361 .free_fixed = gk20a_bitmap_free_fixed, 362 .free_fixed = nvgpu_bitmap_free_fixed,
362 363
363 .base = gk20a_bitmap_alloc_base, 364 .base = nvgpu_bitmap_alloc_base,
364 .length = gk20a_bitmap_alloc_length, 365 .length = nvgpu_bitmap_alloc_length,
365 .end = gk20a_bitmap_alloc_end, 366 .end = nvgpu_bitmap_alloc_end,
366 .inited = gk20a_bitmap_alloc_inited, 367 .inited = nvgpu_bitmap_alloc_inited,
367 368
368 .fini = gk20a_bitmap_alloc_destroy, 369 .fini = nvgpu_bitmap_alloc_destroy,
369 370
370 .print_stats = gk20a_bitmap_print_stats, 371 .print_stats = nvgpu_bitmap_print_stats,
371}; 372};
372 373
373 374
374int gk20a_bitmap_allocator_init(struct gk20a *g, struct gk20a_allocator *__a, 375int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
375 const char *name, u64 base, u64 length, 376 const char *name, u64 base, u64 length,
376 u64 blk_size, u64 flags) 377 u64 blk_size, u64 flags)
377{ 378{
378 int err; 379 int err;
379 struct gk20a_bitmap_allocator *a; 380 struct nvgpu_bitmap_allocator *a;
380 381
381 mutex_lock(&meta_data_cache_lock); 382 mutex_lock(&meta_data_cache_lock);
382 if (!meta_data_cache) 383 if (!meta_data_cache)
383 meta_data_cache = KMEM_CACHE(gk20a_bitmap_alloc, 0); 384 meta_data_cache = KMEM_CACHE(nvgpu_bitmap_alloc, 0);
384 mutex_unlock(&meta_data_cache_lock); 385 mutex_unlock(&meta_data_cache_lock);
385 386
386 if (!meta_data_cache) 387 if (!meta_data_cache)
@@ -402,11 +403,11 @@ int gk20a_bitmap_allocator_init(struct gk20a *g, struct gk20a_allocator *__a,
402 length -= blk_size; 403 length -= blk_size;
403 } 404 }
404 405
405 a = kzalloc(sizeof(struct gk20a_bitmap_allocator), GFP_KERNEL); 406 a = kzalloc(sizeof(struct nvgpu_bitmap_allocator), GFP_KERNEL);
406 if (!a) 407 if (!a)
407 return -ENOMEM; 408 return -ENOMEM;
408 409
409 err = __gk20a_alloc_common_init(__a, name, a, false, &bitmap_ops); 410 err = __nvgpu_alloc_common_init(__a, name, a, false, &bitmap_ops);
410 if (err) 411 if (err)
411 goto fail; 412 goto fail;
412 413
@@ -426,7 +427,7 @@ int gk20a_bitmap_allocator_init(struct gk20a *g, struct gk20a_allocator *__a,
426 wmb(); 427 wmb();
427 a->inited = true; 428 a->inited = true;
428 429
429 gk20a_init_alloc_debug(g, __a); 430 nvgpu_init_alloc_debug(g, __a);
430 alloc_dbg(__a, "New allocator: type bitmap\n"); 431 alloc_dbg(__a, "New allocator: type bitmap\n");
431 alloc_dbg(__a, " base 0x%llx\n", a->base); 432 alloc_dbg(__a, " base 0x%llx\n", a->base);
432 alloc_dbg(__a, " bit_offs 0x%llx\n", a->bit_offs); 433 alloc_dbg(__a, " bit_offs 0x%llx\n", a->bit_offs);
diff --git a/drivers/gpu/nvgpu/gk20a/bitmap_allocator_priv.h b/drivers/gpu/nvgpu/common/mm/bitmap_allocator_priv.h
index a686b704..9802b9db 100644
--- a/drivers/gpu/nvgpu/gk20a/bitmap_allocator_priv.h
+++ b/drivers/gpu/nvgpu/common/mm/bitmap_allocator_priv.h
@@ -19,10 +19,10 @@
19 19
20#include <linux/rbtree.h> 20#include <linux/rbtree.h>
21 21
22struct gk20a_allocator; 22struct nvgpu_allocator;
23 23
24struct gk20a_bitmap_allocator { 24struct nvgpu_bitmap_allocator {
25 struct gk20a_allocator *owner; 25 struct nvgpu_allocator *owner;
26 26
27 u64 base; /* Base address of the space. */ 27 u64 base; /* Base address of the space. */
28 u64 length; /* Length of the space. */ 28 u64 length; /* Length of the space. */
@@ -54,16 +54,16 @@ struct gk20a_bitmap_allocator {
54 u64 bytes_freed; 54 u64 bytes_freed;
55}; 55};
56 56
57struct gk20a_bitmap_alloc { 57struct nvgpu_bitmap_alloc {
58 u64 base; 58 u64 base;
59 u64 length; 59 u64 length;
60 struct rb_node alloc_entry; /* RB tree of allocations. */ 60 struct rb_node alloc_entry; /* RB tree of allocations. */
61}; 61};
62 62
63static inline struct gk20a_bitmap_allocator *bitmap_allocator( 63static inline struct nvgpu_bitmap_allocator *bitmap_allocator(
64 struct gk20a_allocator *a) 64 struct nvgpu_allocator *a)
65{ 65{
66 return (struct gk20a_bitmap_allocator *)(a)->priv; 66 return (struct nvgpu_bitmap_allocator *)(a)->priv;
67} 67}
68 68
69 69
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_allocator_buddy.c b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
index 3715e9f8..39a53801 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a_allocator_buddy.c
+++ b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
@@ -17,20 +17,22 @@
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19 19
20#include "mm_gk20a.h" 20#include <nvgpu/allocator.h>
21#include "platform_gk20a.h" 21
22#include "gk20a_allocator.h" 22#include "gk20a/mm_gk20a.h"
23#include "gk20a/platform_gk20a.h"
24
23#include "buddy_allocator_priv.h" 25#include "buddy_allocator_priv.h"
24 26
25static struct kmem_cache *buddy_cache; /* slab cache for meta data. */ 27static struct kmem_cache *buddy_cache; /* slab cache for meta data. */
26 28
27/* Some other buddy allocator functions. */ 29/* Some other buddy allocator functions. */
28static struct gk20a_buddy *balloc_free_buddy(struct gk20a_buddy_allocator *a, 30static struct nvgpu_buddy *balloc_free_buddy(struct nvgpu_buddy_allocator *a,
29 u64 addr); 31 u64 addr);
30static void balloc_coalesce(struct gk20a_buddy_allocator *a, 32static void balloc_coalesce(struct nvgpu_buddy_allocator *a,
31 struct gk20a_buddy *b); 33 struct nvgpu_buddy *b);
32static void __balloc_do_free_fixed(struct gk20a_buddy_allocator *a, 34static void __balloc_do_free_fixed(struct nvgpu_buddy_allocator *a,
33 struct gk20a_fixed_alloc *falloc); 35 struct nvgpu_fixed_alloc *falloc);
34 36
35/* 37/*
36 * This function is not present in older kernel's list.h code. 38 * This function is not present in older kernel's list.h code.
@@ -64,7 +66,7 @@ static void __balloc_do_free_fixed(struct gk20a_buddy_allocator *a,
64 * Hueristic: Just guessing that the best max order is the largest single 66 * Hueristic: Just guessing that the best max order is the largest single
65 * block that will fit in the address space. 67 * block that will fit in the address space.
66 */ 68 */
67static void balloc_compute_max_order(struct gk20a_buddy_allocator *a) 69static void balloc_compute_max_order(struct nvgpu_buddy_allocator *a)
68{ 70{
69 u64 true_max_order = ilog2(a->blks); 71 u64 true_max_order = ilog2(a->blks);
70 72
@@ -83,7 +85,7 @@ static void balloc_compute_max_order(struct gk20a_buddy_allocator *a)
83 * Since we can only allocate in chucks of a->blk_size we need to trim off 85 * Since we can only allocate in chucks of a->blk_size we need to trim off
84 * any excess data that is not aligned to a->blk_size. 86 * any excess data that is not aligned to a->blk_size.
85 */ 87 */
86static void balloc_allocator_align(struct gk20a_buddy_allocator *a) 88static void balloc_allocator_align(struct nvgpu_buddy_allocator *a)
87{ 89{
88 a->start = ALIGN(a->base, a->blk_size); 90 a->start = ALIGN(a->base, a->blk_size);
89 WARN_ON(a->start != a->base); 91 WARN_ON(a->start != a->base);
@@ -95,17 +97,17 @@ static void balloc_allocator_align(struct gk20a_buddy_allocator *a)
95/* 97/*
96 * Pass NULL for parent if you want a top level buddy. 98 * Pass NULL for parent if you want a top level buddy.
97 */ 99 */
98static struct gk20a_buddy *balloc_new_buddy(struct gk20a_buddy_allocator *a, 100static struct nvgpu_buddy *balloc_new_buddy(struct nvgpu_buddy_allocator *a,
99 struct gk20a_buddy *parent, 101 struct nvgpu_buddy *parent,
100 u64 start, u64 order) 102 u64 start, u64 order)
101{ 103{
102 struct gk20a_buddy *new_buddy; 104 struct nvgpu_buddy *new_buddy;
103 105
104 new_buddy = kmem_cache_alloc(buddy_cache, GFP_KERNEL); 106 new_buddy = kmem_cache_alloc(buddy_cache, GFP_KERNEL);
105 if (!new_buddy) 107 if (!new_buddy)
106 return NULL; 108 return NULL;
107 109
108 memset(new_buddy, 0, sizeof(struct gk20a_buddy)); 110 memset(new_buddy, 0, sizeof(struct nvgpu_buddy));
109 111
110 new_buddy->parent = parent; 112 new_buddy->parent = parent;
111 new_buddy->start = start; 113 new_buddy->start = start;
@@ -116,8 +118,8 @@ static struct gk20a_buddy *balloc_new_buddy(struct gk20a_buddy_allocator *a,
116 return new_buddy; 118 return new_buddy;
117} 119}
118 120
119static void __balloc_buddy_list_add(struct gk20a_buddy_allocator *a, 121static void __balloc_buddy_list_add(struct nvgpu_buddy_allocator *a,
120 struct gk20a_buddy *b, 122 struct nvgpu_buddy *b,
121 struct list_head *list) 123 struct list_head *list)
122{ 124{
123 if (buddy_is_in_list(b)) { 125 if (buddy_is_in_list(b)) {
@@ -141,8 +143,8 @@ static void __balloc_buddy_list_add(struct gk20a_buddy_allocator *a,
141 buddy_set_in_list(b); 143 buddy_set_in_list(b);
142} 144}
143 145
144static void __balloc_buddy_list_rem(struct gk20a_buddy_allocator *a, 146static void __balloc_buddy_list_rem(struct nvgpu_buddy_allocator *a,
145 struct gk20a_buddy *b) 147 struct nvgpu_buddy *b)
146{ 148{
147 if (!buddy_is_in_list(b)) { 149 if (!buddy_is_in_list(b)) {
148 alloc_dbg(balloc_owner(a), 150 alloc_dbg(balloc_owner(a),
@@ -159,21 +161,21 @@ static void __balloc_buddy_list_rem(struct gk20a_buddy_allocator *a,
159 * Add a buddy to one of the buddy lists and deal with the necessary 161 * Add a buddy to one of the buddy lists and deal with the necessary
160 * book keeping. Adds the buddy to the list specified by the buddy's order. 162 * book keeping. Adds the buddy to the list specified by the buddy's order.
161 */ 163 */
162static void balloc_blist_add(struct gk20a_buddy_allocator *a, 164static void balloc_blist_add(struct nvgpu_buddy_allocator *a,
163 struct gk20a_buddy *b) 165 struct nvgpu_buddy *b)
164{ 166{
165 __balloc_buddy_list_add(a, b, balloc_get_order_list(a, b->order)); 167 __balloc_buddy_list_add(a, b, balloc_get_order_list(a, b->order));
166 a->buddy_list_len[b->order]++; 168 a->buddy_list_len[b->order]++;
167} 169}
168 170
169static void balloc_blist_rem(struct gk20a_buddy_allocator *a, 171static void balloc_blist_rem(struct nvgpu_buddy_allocator *a,
170 struct gk20a_buddy *b) 172 struct nvgpu_buddy *b)
171{ 173{
172 __balloc_buddy_list_rem(a, b); 174 __balloc_buddy_list_rem(a, b);
173 a->buddy_list_len[b->order]--; 175 a->buddy_list_len[b->order]--;
174} 176}
175 177
176static u64 balloc_get_order(struct gk20a_buddy_allocator *a, u64 len) 178static u64 balloc_get_order(struct nvgpu_buddy_allocator *a, u64 len)
177{ 179{
178 if (len == 0) 180 if (len == 0)
179 return 0; 181 return 0;
@@ -184,7 +186,7 @@ static u64 balloc_get_order(struct gk20a_buddy_allocator *a, u64 len)
184 return fls(len); 186 return fls(len);
185} 187}
186 188
187static u64 __balloc_max_order_in(struct gk20a_buddy_allocator *a, 189static u64 __balloc_max_order_in(struct nvgpu_buddy_allocator *a,
188 u64 start, u64 end) 190 u64 start, u64 end)
189{ 191{
190 u64 size = (end - start) >> a->blk_shift; 192 u64 size = (end - start) >> a->blk_shift;
@@ -198,11 +200,11 @@ static u64 __balloc_max_order_in(struct gk20a_buddy_allocator *a,
198/* 200/*
199 * Initialize the buddy lists. 201 * Initialize the buddy lists.
200 */ 202 */
201static int balloc_init_lists(struct gk20a_buddy_allocator *a) 203static int balloc_init_lists(struct nvgpu_buddy_allocator *a)
202{ 204{
203 int i; 205 int i;
204 u64 bstart, bend, order; 206 u64 bstart, bend, order;
205 struct gk20a_buddy *buddy; 207 struct nvgpu_buddy *buddy;
206 208
207 bstart = a->start; 209 bstart = a->start;
208 bend = a->end; 210 bend = a->end;
@@ -228,7 +230,7 @@ cleanup:
228 for (i = 0; i < GPU_BALLOC_ORDER_LIST_LEN; i++) { 230 for (i = 0; i < GPU_BALLOC_ORDER_LIST_LEN; i++) {
229 if (!list_empty(balloc_get_order_list(a, i))) { 231 if (!list_empty(balloc_get_order_list(a, i))) {
230 buddy = list_first_entry(balloc_get_order_list(a, i), 232 buddy = list_first_entry(balloc_get_order_list(a, i),
231 struct gk20a_buddy, buddy_entry); 233 struct nvgpu_buddy, buddy_entry);
232 balloc_blist_rem(a, buddy); 234 balloc_blist_rem(a, buddy);
233 kmem_cache_free(buddy_cache, buddy); 235 kmem_cache_free(buddy_cache, buddy);
234 } 236 }
@@ -240,24 +242,24 @@ cleanup:
240/* 242/*
241 * Clean up and destroy the passed allocator. 243 * Clean up and destroy the passed allocator.
242 */ 244 */
243static void gk20a_buddy_allocator_destroy(struct gk20a_allocator *__a) 245static void nvgpu_buddy_allocator_destroy(struct nvgpu_allocator *__a)
244{ 246{
245 int i; 247 int i;
246 struct rb_node *node; 248 struct rb_node *node;
247 struct gk20a_buddy *bud; 249 struct nvgpu_buddy *bud;
248 struct gk20a_fixed_alloc *falloc; 250 struct nvgpu_fixed_alloc *falloc;
249 struct gk20a_buddy_allocator *a = __a->priv; 251 struct nvgpu_buddy_allocator *a = __a->priv;
250 252
251 alloc_lock(__a); 253 alloc_lock(__a);
252 254
253 gk20a_fini_alloc_debug(__a); 255 nvgpu_fini_alloc_debug(__a);
254 256
255 /* 257 /*
256 * Free the fixed allocs first. 258 * Free the fixed allocs first.
257 */ 259 */
258 while ((node = rb_first(&a->fixed_allocs)) != NULL) { 260 while ((node = rb_first(&a->fixed_allocs)) != NULL) {
259 falloc = container_of(node, 261 falloc = container_of(node,
260 struct gk20a_fixed_alloc, alloced_entry); 262 struct nvgpu_fixed_alloc, alloced_entry);
261 263
262 rb_erase(node, &a->fixed_allocs); 264 rb_erase(node, &a->fixed_allocs);
263 __balloc_do_free_fixed(a, falloc); 265 __balloc_do_free_fixed(a, falloc);
@@ -267,7 +269,7 @@ static void gk20a_buddy_allocator_destroy(struct gk20a_allocator *__a)
267 * And now free all outstanding allocations. 269 * And now free all outstanding allocations.
268 */ 270 */
269 while ((node = rb_first(&a->alloced_buddies)) != NULL) { 271 while ((node = rb_first(&a->alloced_buddies)) != NULL) {
270 bud = container_of(node, struct gk20a_buddy, alloced_entry); 272 bud = container_of(node, struct nvgpu_buddy, alloced_entry);
271 balloc_free_buddy(a, bud->start); 273 balloc_free_buddy(a, bud->start);
272 balloc_blist_add(a, bud); 274 balloc_blist_add(a, bud);
273 balloc_coalesce(a, bud); 275 balloc_coalesce(a, bud);
@@ -281,7 +283,7 @@ static void gk20a_buddy_allocator_destroy(struct gk20a_allocator *__a)
281 283
282 while (!list_empty(balloc_get_order_list(a, i))) { 284 while (!list_empty(balloc_get_order_list(a, i))) {
283 bud = list_first_entry(balloc_get_order_list(a, i), 285 bud = list_first_entry(balloc_get_order_list(a, i),
284 struct gk20a_buddy, buddy_entry); 286 struct nvgpu_buddy, buddy_entry);
285 balloc_blist_rem(a, bud); 287 balloc_blist_rem(a, bud);
286 kmem_cache_free(buddy_cache, bud); 288 kmem_cache_free(buddy_cache, bud);
287 } 289 }
@@ -314,10 +316,10 @@ static void gk20a_buddy_allocator_destroy(struct gk20a_allocator *__a)
314 * 316 *
315 * @a must be locked. 317 * @a must be locked.
316 */ 318 */
317static void balloc_coalesce(struct gk20a_buddy_allocator *a, 319static void balloc_coalesce(struct nvgpu_buddy_allocator *a,
318 struct gk20a_buddy *b) 320 struct nvgpu_buddy *b)
319{ 321{
320 struct gk20a_buddy *parent; 322 struct nvgpu_buddy *parent;
321 323
322 if (buddy_is_alloced(b) || buddy_is_split(b)) 324 if (buddy_is_alloced(b) || buddy_is_split(b))
323 return; 325 return;
@@ -355,10 +357,10 @@ static void balloc_coalesce(struct gk20a_buddy_allocator *a,
355 * 357 *
356 * @a must be locked. 358 * @a must be locked.
357 */ 359 */
358static int balloc_split_buddy(struct gk20a_buddy_allocator *a, 360static int balloc_split_buddy(struct nvgpu_buddy_allocator *a,
359 struct gk20a_buddy *b, int pte_size) 361 struct nvgpu_buddy *b, int pte_size)
360{ 362{
361 struct gk20a_buddy *left, *right; 363 struct nvgpu_buddy *left, *right;
362 u64 half; 364 u64 half;
363 365
364 left = balloc_new_buddy(a, b, b->start, b->order - 1); 366 left = balloc_new_buddy(a, b, b->start, b->order - 1);
@@ -403,14 +405,14 @@ static int balloc_split_buddy(struct gk20a_buddy_allocator *a,
403 * 405 *
404 * @a must be locked. 406 * @a must be locked.
405 */ 407 */
406static void balloc_alloc_buddy(struct gk20a_buddy_allocator *a, 408static void balloc_alloc_buddy(struct nvgpu_buddy_allocator *a,
407 struct gk20a_buddy *b) 409 struct nvgpu_buddy *b)
408{ 410{
409 struct rb_node **new = &(a->alloced_buddies.rb_node); 411 struct rb_node **new = &(a->alloced_buddies.rb_node);
410 struct rb_node *parent = NULL; 412 struct rb_node *parent = NULL;
411 413
412 while (*new) { 414 while (*new) {
413 struct gk20a_buddy *bud = container_of(*new, struct gk20a_buddy, 415 struct nvgpu_buddy *bud = container_of(*new, struct nvgpu_buddy,
414 alloced_entry); 416 alloced_entry);
415 417
416 parent = *new; 418 parent = *new;
@@ -435,14 +437,14 @@ static void balloc_alloc_buddy(struct gk20a_buddy_allocator *a,
435 * 437 *
436 * @a must be locked. 438 * @a must be locked.
437 */ 439 */
438static struct gk20a_buddy *balloc_free_buddy(struct gk20a_buddy_allocator *a, 440static struct nvgpu_buddy *balloc_free_buddy(struct nvgpu_buddy_allocator *a,
439 u64 addr) 441 u64 addr)
440{ 442{
441 struct rb_node *node = a->alloced_buddies.rb_node; 443 struct rb_node *node = a->alloced_buddies.rb_node;
442 struct gk20a_buddy *bud; 444 struct nvgpu_buddy *bud;
443 445
444 while (node) { 446 while (node) {
445 bud = container_of(node, struct gk20a_buddy, alloced_entry); 447 bud = container_of(node, struct nvgpu_buddy, alloced_entry);
446 448
447 if (addr < bud->start) 449 if (addr < bud->start)
448 node = node->rb_left; 450 node = node->rb_left;
@@ -465,10 +467,10 @@ static struct gk20a_buddy *balloc_free_buddy(struct gk20a_buddy_allocator *a,
465/* 467/*
466 * Find a suitable buddy for the given order and PTE type (big or little). 468 * Find a suitable buddy for the given order and PTE type (big or little).
467 */ 469 */
468static struct gk20a_buddy *__balloc_find_buddy(struct gk20a_buddy_allocator *a, 470static struct nvgpu_buddy *__balloc_find_buddy(struct nvgpu_buddy_allocator *a,
469 u64 order, int pte_size) 471 u64 order, int pte_size)
470{ 472{
471 struct gk20a_buddy *bud; 473 struct nvgpu_buddy *bud;
472 474
473 if (order > a->max_order || 475 if (order > a->max_order ||
474 list_empty(balloc_get_order_list(a, order))) 476 list_empty(balloc_get_order_list(a, order)))
@@ -477,10 +479,10 @@ static struct gk20a_buddy *__balloc_find_buddy(struct gk20a_buddy_allocator *a,
477 if (a->flags & GPU_ALLOC_GVA_SPACE && 479 if (a->flags & GPU_ALLOC_GVA_SPACE &&
478 pte_size == gmmu_page_size_big) 480 pte_size == gmmu_page_size_big)
479 bud = list_last_entry(balloc_get_order_list(a, order), 481 bud = list_last_entry(balloc_get_order_list(a, order),
480 struct gk20a_buddy, buddy_entry); 482 struct nvgpu_buddy, buddy_entry);
481 else 483 else
482 bud = list_first_entry(balloc_get_order_list(a, order), 484 bud = list_first_entry(balloc_get_order_list(a, order),
483 struct gk20a_buddy, buddy_entry); 485 struct nvgpu_buddy, buddy_entry);
484 486
485 if (bud->pte_size != BALLOC_PTE_SIZE_ANY && 487 if (bud->pte_size != BALLOC_PTE_SIZE_ANY &&
486 bud->pte_size != pte_size) 488 bud->pte_size != pte_size)
@@ -498,11 +500,11 @@ static struct gk20a_buddy *__balloc_find_buddy(struct gk20a_buddy_allocator *a,
498 * 500 *
499 * @a must be locked. 501 * @a must be locked.
500 */ 502 */
501static u64 __balloc_do_alloc(struct gk20a_buddy_allocator *a, 503static u64 __balloc_do_alloc(struct nvgpu_buddy_allocator *a,
502 u64 order, int pte_size) 504 u64 order, int pte_size)
503{ 505{
504 u64 split_order; 506 u64 split_order;
505 struct gk20a_buddy *bud = NULL; 507 struct nvgpu_buddy *bud = NULL;
506 508
507 split_order = order; 509 split_order = order;
508 while (split_order <= a->max_order && 510 while (split_order <= a->max_order &&
@@ -532,17 +534,17 @@ static u64 __balloc_do_alloc(struct gk20a_buddy_allocator *a,
532 * TODO: Right now this uses the unoptimal approach of going through all 534 * TODO: Right now this uses the unoptimal approach of going through all
533 * outstanding allocations and checking their base/ends. This could be better. 535 * outstanding allocations and checking their base/ends. This could be better.
534 */ 536 */
535static int balloc_is_range_free(struct gk20a_buddy_allocator *a, 537static int balloc_is_range_free(struct nvgpu_buddy_allocator *a,
536 u64 base, u64 end) 538 u64 base, u64 end)
537{ 539{
538 struct rb_node *node; 540 struct rb_node *node;
539 struct gk20a_buddy *bud; 541 struct nvgpu_buddy *bud;
540 542
541 node = rb_first(&a->alloced_buddies); 543 node = rb_first(&a->alloced_buddies);
542 if (!node) 544 if (!node)
543 return 1; /* No allocs yet. */ 545 return 1; /* No allocs yet. */
544 546
545 bud = container_of(node, struct gk20a_buddy, alloced_entry); 547 bud = container_of(node, struct nvgpu_buddy, alloced_entry);
546 548
547 while (bud->start < end) { 549 while (bud->start < end) {
548 if ((bud->start > base && bud->start < end) || 550 if ((bud->start > base && bud->start < end) ||
@@ -552,21 +554,21 @@ static int balloc_is_range_free(struct gk20a_buddy_allocator *a,
552 node = rb_next(node); 554 node = rb_next(node);
553 if (!node) 555 if (!node)
554 break; 556 break;
555 bud = container_of(node, struct gk20a_buddy, alloced_entry); 557 bud = container_of(node, struct nvgpu_buddy, alloced_entry);
556 } 558 }
557 559
558 return 1; 560 return 1;
559} 561}
560 562
561static void balloc_alloc_fixed(struct gk20a_buddy_allocator *a, 563static void balloc_alloc_fixed(struct nvgpu_buddy_allocator *a,
562 struct gk20a_fixed_alloc *f) 564 struct nvgpu_fixed_alloc *f)
563{ 565{
564 struct rb_node **new = &(a->fixed_allocs.rb_node); 566 struct rb_node **new = &(a->fixed_allocs.rb_node);
565 struct rb_node *parent = NULL; 567 struct rb_node *parent = NULL;
566 568
567 while (*new) { 569 while (*new) {
568 struct gk20a_fixed_alloc *falloc = 570 struct nvgpu_fixed_alloc *falloc =
569 container_of(*new, struct gk20a_fixed_alloc, 571 container_of(*new, struct nvgpu_fixed_alloc,
570 alloced_entry); 572 alloced_entry);
571 573
572 BUG_ON(!virt_addr_valid(falloc)); 574 BUG_ON(!virt_addr_valid(falloc));
@@ -590,15 +592,15 @@ static void balloc_alloc_fixed(struct gk20a_buddy_allocator *a,
590 * 592 *
591 * @a must be locked. 593 * @a must be locked.
592 */ 594 */
593static struct gk20a_fixed_alloc *balloc_free_fixed( 595static struct nvgpu_fixed_alloc *balloc_free_fixed(
594 struct gk20a_buddy_allocator *a, u64 addr) 596 struct nvgpu_buddy_allocator *a, u64 addr)
595{ 597{
596 struct rb_node *node = a->fixed_allocs.rb_node; 598 struct rb_node *node = a->fixed_allocs.rb_node;
597 struct gk20a_fixed_alloc *falloc; 599 struct nvgpu_fixed_alloc *falloc;
598 600
599 while (node) { 601 while (node) {
600 falloc = container_of(node, 602 falloc = container_of(node,
601 struct gk20a_fixed_alloc, alloced_entry); 603 struct nvgpu_fixed_alloc, alloced_entry);
602 604
603 if (addr < falloc->start) 605 if (addr < falloc->start)
604 node = node->rb_left; 606 node = node->rb_left;
@@ -620,7 +622,7 @@ static struct gk20a_fixed_alloc *balloc_free_fixed(
620 * Find the parent range - doesn't necessarily need the parent to actually exist 622 * Find the parent range - doesn't necessarily need the parent to actually exist
621 * as a buddy. Finding an existing parent comes later... 623 * as a buddy. Finding an existing parent comes later...
622 */ 624 */
623static void __balloc_get_parent_range(struct gk20a_buddy_allocator *a, 625static void __balloc_get_parent_range(struct nvgpu_buddy_allocator *a,
624 u64 base, u64 order, 626 u64 base, u64 order,
625 u64 *pbase, u64 *porder) 627 u64 *pbase, u64 *porder)
626{ 628{
@@ -640,10 +642,10 @@ static void __balloc_get_parent_range(struct gk20a_buddy_allocator *a,
640 * Makes a buddy at the passed address. This will make all parent buddies 642 * Makes a buddy at the passed address. This will make all parent buddies
641 * necessary for this buddy to exist as well. 643 * necessary for this buddy to exist as well.
642 */ 644 */
643static struct gk20a_buddy *__balloc_make_fixed_buddy( 645static struct nvgpu_buddy *__balloc_make_fixed_buddy(
644 struct gk20a_buddy_allocator *a, u64 base, u64 order) 646 struct nvgpu_buddy_allocator *a, u64 base, u64 order)
645{ 647{
646 struct gk20a_buddy *bud = NULL; 648 struct nvgpu_buddy *bud = NULL;
647 struct list_head *order_list; 649 struct list_head *order_list;
648 u64 cur_order = order, cur_base = base; 650 u64 cur_order = order, cur_base = base;
649 651
@@ -696,8 +698,8 @@ static struct gk20a_buddy *__balloc_make_fixed_buddy(
696 return bud; 698 return bud;
697} 699}
698 700
699static u64 __balloc_do_alloc_fixed(struct gk20a_buddy_allocator *a, 701static u64 __balloc_do_alloc_fixed(struct nvgpu_buddy_allocator *a,
700 struct gk20a_fixed_alloc *falloc, 702 struct nvgpu_fixed_alloc *falloc,
701 u64 base, u64 len) 703 u64 base, u64 len)
702{ 704{
703 u64 shifted_base, inc_base; 705 u64 shifted_base, inc_base;
@@ -725,7 +727,7 @@ static u64 __balloc_do_alloc_fixed(struct gk20a_buddy_allocator *a,
725 while (inc_base < (shifted_base + len)) { 727 while (inc_base < (shifted_base + len)) {
726 u64 order_len = balloc_order_to_len(a, align_order); 728 u64 order_len = balloc_order_to_len(a, align_order);
727 u64 remaining; 729 u64 remaining;
728 struct gk20a_buddy *bud; 730 struct nvgpu_buddy *bud;
729 731
730 bud = __balloc_make_fixed_buddy(a, 732 bud = __balloc_make_fixed_buddy(a,
731 balloc_base_unshift(a, inc_base), 733 balloc_base_unshift(a, inc_base),
@@ -757,8 +759,8 @@ static u64 __balloc_do_alloc_fixed(struct gk20a_buddy_allocator *a,
757 759
758err_and_cleanup: 760err_and_cleanup:
759 while (!list_empty(&falloc->buddies)) { 761 while (!list_empty(&falloc->buddies)) {
760 struct gk20a_buddy *bud = list_first_entry(&falloc->buddies, 762 struct nvgpu_buddy *bud = list_first_entry(&falloc->buddies,
761 struct gk20a_buddy, 763 struct nvgpu_buddy,
762 buddy_entry); 764 buddy_entry);
763 765
764 __balloc_buddy_list_rem(a, bud); 766 __balloc_buddy_list_rem(a, bud);
@@ -769,14 +771,14 @@ err_and_cleanup:
769 return 0; 771 return 0;
770} 772}
771 773
772static void __balloc_do_free_fixed(struct gk20a_buddy_allocator *a, 774static void __balloc_do_free_fixed(struct nvgpu_buddy_allocator *a,
773 struct gk20a_fixed_alloc *falloc) 775 struct nvgpu_fixed_alloc *falloc)
774{ 776{
775 struct gk20a_buddy *bud; 777 struct nvgpu_buddy *bud;
776 778
777 while (!list_empty(&falloc->buddies)) { 779 while (!list_empty(&falloc->buddies)) {
778 bud = list_first_entry(&falloc->buddies, 780 bud = list_first_entry(&falloc->buddies,
779 struct gk20a_buddy, 781 struct nvgpu_buddy,
780 buddy_entry); 782 buddy_entry);
781 __balloc_buddy_list_rem(a, bud); 783 __balloc_buddy_list_rem(a, bud);
782 784
@@ -796,13 +798,13 @@ static void __balloc_do_free_fixed(struct gk20a_buddy_allocator *a,
796/* 798/*
797 * Allocate memory from the passed allocator. 799 * Allocate memory from the passed allocator.
798 */ 800 */
799static u64 gk20a_buddy_balloc(struct gk20a_allocator *__a, u64 len) 801static u64 nvgpu_buddy_balloc(struct nvgpu_allocator *__a, u64 len)
800{ 802{
801 u64 order, addr; 803 u64 order, addr;
802 int pte_size; 804 int pte_size;
803 struct gk20a_buddy_allocator *a = __a->priv; 805 struct nvgpu_buddy_allocator *a = __a->priv;
804 806
805 gk20a_alloc_trace_func(); 807 nvgpu_alloc_trace_func();
806 808
807 alloc_lock(__a); 809 alloc_lock(__a);
808 810
@@ -811,7 +813,7 @@ static u64 gk20a_buddy_balloc(struct gk20a_allocator *__a, u64 len)
811 if (order > a->max_order) { 813 if (order > a->max_order) {
812 alloc_unlock(__a); 814 alloc_unlock(__a);
813 alloc_dbg(balloc_owner(a), "Alloc fail\n"); 815 alloc_dbg(balloc_owner(a), "Alloc fail\n");
814 gk20a_alloc_trace_func_done(); 816 nvgpu_alloc_trace_func_done();
815 return 0; 817 return 0;
816 } 818 }
817 819
@@ -848,22 +850,22 @@ static u64 gk20a_buddy_balloc(struct gk20a_allocator *__a, u64 len)
848 850
849 alloc_unlock(__a); 851 alloc_unlock(__a);
850 852
851 gk20a_alloc_trace_func_done(); 853 nvgpu_alloc_trace_func_done();
852 return addr; 854 return addr;
853} 855}
854 856
855/* 857/*
856 * Requires @__a to be locked. 858 * Requires @__a to be locked.
857 */ 859 */
858static u64 __gk20a_balloc_fixed_buddy(struct gk20a_allocator *__a, 860static u64 __nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *__a,
859 u64 base, u64 len) 861 u64 base, u64 len)
860{ 862{
861 u64 ret, real_bytes = 0; 863 u64 ret, real_bytes = 0;
862 struct gk20a_buddy *bud; 864 struct nvgpu_buddy *bud;
863 struct gk20a_fixed_alloc *falloc = NULL; 865 struct nvgpu_fixed_alloc *falloc = NULL;
864 struct gk20a_buddy_allocator *a = __a->priv; 866 struct nvgpu_buddy_allocator *a = __a->priv;
865 867
866 gk20a_alloc_trace_func(); 868 nvgpu_alloc_trace_func();
867 869
868 /* If base isn't aligned to an order 0 block, fail. */ 870 /* If base isn't aligned to an order 0 block, fail. */
869 if (base & (a->blk_size - 1)) 871 if (base & (a->blk_size - 1))
@@ -905,14 +907,14 @@ static u64 __gk20a_balloc_fixed_buddy(struct gk20a_allocator *__a,
905 907
906 alloc_dbg(balloc_owner(a), "Alloc (fixed) 0x%llx\n", base); 908 alloc_dbg(balloc_owner(a), "Alloc (fixed) 0x%llx\n", base);
907 909
908 gk20a_alloc_trace_func_done(); 910 nvgpu_alloc_trace_func_done();
909 return base; 911 return base;
910 912
911fail_unlock: 913fail_unlock:
912 alloc_unlock(__a); 914 alloc_unlock(__a);
913fail: 915fail:
914 kfree(falloc); 916 kfree(falloc);
915 gk20a_alloc_trace_func_done(); 917 nvgpu_alloc_trace_func_done();
916 return 0; 918 return 0;
917} 919}
918 920
@@ -924,14 +926,14 @@ fail:
924 * 926 *
925 * Please do not use this function unless _absolutely_ necessary. 927 * Please do not use this function unless _absolutely_ necessary.
926 */ 928 */
927static u64 gk20a_balloc_fixed_buddy(struct gk20a_allocator *__a, 929static u64 nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *__a,
928 u64 base, u64 len) 930 u64 base, u64 len)
929{ 931{
930 u64 alloc; 932 u64 alloc;
931 struct gk20a_buddy_allocator *a = __a->priv; 933 struct nvgpu_buddy_allocator *a = __a->priv;
932 934
933 alloc_lock(__a); 935 alloc_lock(__a);
934 alloc = __gk20a_balloc_fixed_buddy(__a, base, len); 936 alloc = __nvgpu_balloc_fixed_buddy(__a, base, len);
935 a->alloc_made = 1; 937 a->alloc_made = 1;
936 alloc_unlock(__a); 938 alloc_unlock(__a);
937 939
@@ -941,16 +943,16 @@ static u64 gk20a_balloc_fixed_buddy(struct gk20a_allocator *__a,
941/* 943/*
942 * Free the passed allocation. 944 * Free the passed allocation.
943 */ 945 */
944static void gk20a_buddy_bfree(struct gk20a_allocator *__a, u64 addr) 946static void nvgpu_buddy_bfree(struct nvgpu_allocator *__a, u64 addr)
945{ 947{
946 struct gk20a_buddy *bud; 948 struct nvgpu_buddy *bud;
947 struct gk20a_fixed_alloc *falloc; 949 struct nvgpu_fixed_alloc *falloc;
948 struct gk20a_buddy_allocator *a = __a->priv; 950 struct nvgpu_buddy_allocator *a = __a->priv;
949 951
950 gk20a_alloc_trace_func(); 952 nvgpu_alloc_trace_func();
951 953
952 if (!addr) { 954 if (!addr) {
953 gk20a_alloc_trace_func_done(); 955 nvgpu_alloc_trace_func_done();
954 return; 956 return;
955 } 957 }
956 958
@@ -981,14 +983,14 @@ static void gk20a_buddy_bfree(struct gk20a_allocator *__a, u64 addr)
981done: 983done:
982 alloc_unlock(__a); 984 alloc_unlock(__a);
983 alloc_dbg(balloc_owner(a), "Free 0x%llx\n", addr); 985 alloc_dbg(balloc_owner(a), "Free 0x%llx\n", addr);
984 gk20a_alloc_trace_func_done(); 986 nvgpu_alloc_trace_func_done();
985 return; 987 return;
986} 988}
987 989
988static bool gk20a_buddy_reserve_is_possible(struct gk20a_buddy_allocator *a, 990static bool nvgpu_buddy_reserve_is_possible(struct nvgpu_buddy_allocator *a,
989 struct gk20a_alloc_carveout *co) 991 struct nvgpu_alloc_carveout *co)
990{ 992{
991 struct gk20a_alloc_carveout *tmp; 993 struct nvgpu_alloc_carveout *tmp;
992 u64 co_base, co_end; 994 u64 co_base, co_end;
993 995
994 co_base = co->base; 996 co_base = co->base;
@@ -1013,10 +1015,10 @@ static bool gk20a_buddy_reserve_is_possible(struct gk20a_buddy_allocator *a,
1013 * Carveouts can only be reserved before any regular allocations have been 1015 * Carveouts can only be reserved before any regular allocations have been
1014 * made. 1016 * made.
1015 */ 1017 */
1016static int gk20a_buddy_reserve_co(struct gk20a_allocator *__a, 1018static int nvgpu_buddy_reserve_co(struct nvgpu_allocator *__a,
1017 struct gk20a_alloc_carveout *co) 1019 struct nvgpu_alloc_carveout *co)
1018{ 1020{
1019 struct gk20a_buddy_allocator *a = __a->priv; 1021 struct nvgpu_buddy_allocator *a = __a->priv;
1020 u64 addr; 1022 u64 addr;
1021 int err = 0; 1023 int err = 0;
1022 1024
@@ -1026,13 +1028,13 @@ static int gk20a_buddy_reserve_co(struct gk20a_allocator *__a,
1026 1028
1027 alloc_lock(__a); 1029 alloc_lock(__a);
1028 1030
1029 if (!gk20a_buddy_reserve_is_possible(a, co)) { 1031 if (!nvgpu_buddy_reserve_is_possible(a, co)) {
1030 err = -EBUSY; 1032 err = -EBUSY;
1031 goto done; 1033 goto done;
1032 } 1034 }
1033 1035
1034 /* Should not be possible to fail... */ 1036 /* Should not be possible to fail... */
1035 addr = __gk20a_balloc_fixed_buddy(__a, co->base, co->length); 1037 addr = __nvgpu_balloc_fixed_buddy(__a, co->base, co->length);
1036 if (!addr) { 1038 if (!addr) {
1037 err = -ENOMEM; 1039 err = -ENOMEM;
1038 pr_warn("%s: Failed to reserve a valid carveout!\n", __func__); 1040 pr_warn("%s: Failed to reserve a valid carveout!\n", __func__);
@@ -1049,50 +1051,50 @@ done:
1049/* 1051/*
1050 * Carveouts can be release at any time. 1052 * Carveouts can be release at any time.
1051 */ 1053 */
1052static void gk20a_buddy_release_co(struct gk20a_allocator *__a, 1054static void nvgpu_buddy_release_co(struct nvgpu_allocator *__a,
1053 struct gk20a_alloc_carveout *co) 1055 struct nvgpu_alloc_carveout *co)
1054{ 1056{
1055 alloc_lock(__a); 1057 alloc_lock(__a);
1056 1058
1057 list_del_init(&co->co_entry); 1059 list_del_init(&co->co_entry);
1058 gk20a_free(__a, co->base); 1060 nvgpu_free(__a, co->base);
1059 1061
1060 alloc_unlock(__a); 1062 alloc_unlock(__a);
1061} 1063}
1062 1064
1063static u64 gk20a_buddy_alloc_length(struct gk20a_allocator *a) 1065static u64 nvgpu_buddy_alloc_length(struct nvgpu_allocator *a)
1064{ 1066{
1065 struct gk20a_buddy_allocator *ba = a->priv; 1067 struct nvgpu_buddy_allocator *ba = a->priv;
1066 1068
1067 return ba->length; 1069 return ba->length;
1068} 1070}
1069 1071
1070static u64 gk20a_buddy_alloc_base(struct gk20a_allocator *a) 1072static u64 nvgpu_buddy_alloc_base(struct nvgpu_allocator *a)
1071{ 1073{
1072 struct gk20a_buddy_allocator *ba = a->priv; 1074 struct nvgpu_buddy_allocator *ba = a->priv;
1073 1075
1074 return ba->start; 1076 return ba->start;
1075} 1077}
1076 1078
1077static int gk20a_buddy_alloc_inited(struct gk20a_allocator *a) 1079static int nvgpu_buddy_alloc_inited(struct nvgpu_allocator *a)
1078{ 1080{
1079 struct gk20a_buddy_allocator *ba = a->priv; 1081 struct nvgpu_buddy_allocator *ba = a->priv;
1080 int inited = ba->initialized; 1082 int inited = ba->initialized;
1081 1083
1082 rmb(); 1084 rmb();
1083 return inited; 1085 return inited;
1084} 1086}
1085 1087
1086static u64 gk20a_buddy_alloc_end(struct gk20a_allocator *a) 1088static u64 nvgpu_buddy_alloc_end(struct nvgpu_allocator *a)
1087{ 1089{
1088 struct gk20a_buddy_allocator *ba = a->priv; 1090 struct nvgpu_buddy_allocator *ba = a->priv;
1089 1091
1090 return ba->end; 1092 return ba->end;
1091} 1093}
1092 1094
1093static u64 gk20a_buddy_alloc_space(struct gk20a_allocator *a) 1095static u64 nvgpu_buddy_alloc_space(struct nvgpu_allocator *a)
1094{ 1096{
1095 struct gk20a_buddy_allocator *ba = a->priv; 1097 struct nvgpu_buddy_allocator *ba = a->priv;
1096 u64 space; 1098 u64 space;
1097 1099
1098 alloc_lock(a); 1100 alloc_lock(a);
@@ -1108,14 +1110,14 @@ static u64 gk20a_buddy_alloc_space(struct gk20a_allocator *a)
1108 * stats are printed to the kernel log. This lets this code be used for 1110 * stats are printed to the kernel log. This lets this code be used for
1109 * debugging purposes internal to the allocator. 1111 * debugging purposes internal to the allocator.
1110 */ 1112 */
1111static void gk20a_buddy_print_stats(struct gk20a_allocator *__a, 1113static void nvgpu_buddy_print_stats(struct nvgpu_allocator *__a,
1112 struct seq_file *s, int lock) 1114 struct seq_file *s, int lock)
1113{ 1115{
1114 int i = 0; 1116 int i = 0;
1115 struct rb_node *node; 1117 struct rb_node *node;
1116 struct gk20a_fixed_alloc *falloc; 1118 struct nvgpu_fixed_alloc *falloc;
1117 struct gk20a_alloc_carveout *tmp; 1119 struct nvgpu_alloc_carveout *tmp;
1118 struct gk20a_buddy_allocator *a = __a->priv; 1120 struct nvgpu_buddy_allocator *a = __a->priv;
1119 1121
1120 __alloc_pstat(s, __a, "base = %llu, limit = %llu, blk_size = %llu\n", 1122 __alloc_pstat(s, __a, "base = %llu, limit = %llu, blk_size = %llu\n",
1121 a->base, a->length, a->blk_size); 1123 a->base, a->length, a->blk_size);
@@ -1161,7 +1163,7 @@ static void gk20a_buddy_print_stats(struct gk20a_allocator *__a,
1161 node != NULL; 1163 node != NULL;
1162 node = rb_next(node)) { 1164 node = rb_next(node)) {
1163 falloc = container_of(node, 1165 falloc = container_of(node,
1164 struct gk20a_fixed_alloc, alloced_entry); 1166 struct nvgpu_fixed_alloc, alloced_entry);
1165 1167
1166 __alloc_pstat(s, __a, "Fixed alloc (%d): [0x%llx -> 0x%llx]\n", 1168 __alloc_pstat(s, __a, "Fixed alloc (%d): [0x%llx -> 0x%llx]\n",
1167 i, falloc->start, falloc->end); 1169 i, falloc->start, falloc->end);
@@ -1179,25 +1181,25 @@ static void gk20a_buddy_print_stats(struct gk20a_allocator *__a,
1179 alloc_unlock(__a); 1181 alloc_unlock(__a);
1180} 1182}
1181 1183
1182static const struct gk20a_allocator_ops buddy_ops = { 1184static const struct nvgpu_allocator_ops buddy_ops = {
1183 .alloc = gk20a_buddy_balloc, 1185 .alloc = nvgpu_buddy_balloc,
1184 .free = gk20a_buddy_bfree, 1186 .free = nvgpu_buddy_bfree,
1185 1187
1186 .alloc_fixed = gk20a_balloc_fixed_buddy, 1188 .alloc_fixed = nvgpu_balloc_fixed_buddy,
1187 /* .free_fixed not needed. */ 1189 /* .free_fixed not needed. */
1188 1190
1189 .reserve_carveout = gk20a_buddy_reserve_co, 1191 .reserve_carveout = nvgpu_buddy_reserve_co,
1190 .release_carveout = gk20a_buddy_release_co, 1192 .release_carveout = nvgpu_buddy_release_co,
1191 1193
1192 .base = gk20a_buddy_alloc_base, 1194 .base = nvgpu_buddy_alloc_base,
1193 .length = gk20a_buddy_alloc_length, 1195 .length = nvgpu_buddy_alloc_length,
1194 .end = gk20a_buddy_alloc_end, 1196 .end = nvgpu_buddy_alloc_end,
1195 .inited = gk20a_buddy_alloc_inited, 1197 .inited = nvgpu_buddy_alloc_inited,
1196 .space = gk20a_buddy_alloc_space, 1198 .space = nvgpu_buddy_alloc_space,
1197 1199
1198 .fini = gk20a_buddy_allocator_destroy, 1200 .fini = nvgpu_buddy_allocator_destroy,
1199 1201
1200 .print_stats = gk20a_buddy_print_stats, 1202 .print_stats = nvgpu_buddy_print_stats,
1201}; 1203};
1202 1204
1203/* 1205/*
@@ -1218,14 +1220,14 @@ static const struct gk20a_allocator_ops buddy_ops = {
1218 * will try and pick a reasonable max order. 1220 * will try and pick a reasonable max order.
1219 * @flags: Extra flags necessary. See GPU_BALLOC_*. 1221 * @flags: Extra flags necessary. See GPU_BALLOC_*.
1220 */ 1222 */
1221int __gk20a_buddy_allocator_init(struct gk20a *g, struct gk20a_allocator *__a, 1223int __nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
1222 struct vm_gk20a *vm, const char *name, 1224 struct vm_gk20a *vm, const char *name,
1223 u64 base, u64 size, u64 blk_size, 1225 u64 base, u64 size, u64 blk_size,
1224 u64 max_order, u64 flags) 1226 u64 max_order, u64 flags)
1225{ 1227{
1226 int err; 1228 int err;
1227 u64 pde_size; 1229 u64 pde_size;
1228 struct gk20a_buddy_allocator *a; 1230 struct nvgpu_buddy_allocator *a;
1229 1231
1230 /* blk_size must be greater than 0 and a power of 2. */ 1232 /* blk_size must be greater than 0 and a power of 2. */
1231 if (blk_size == 0) 1233 if (blk_size == 0)
@@ -1240,11 +1242,11 @@ int __gk20a_buddy_allocator_init(struct gk20a *g, struct gk20a_allocator *__a,
1240 if (flags & GPU_ALLOC_GVA_SPACE && !vm) 1242 if (flags & GPU_ALLOC_GVA_SPACE && !vm)
1241 return -EINVAL; 1243 return -EINVAL;
1242 1244
1243 a = kzalloc(sizeof(struct gk20a_buddy_allocator), GFP_KERNEL); 1245 a = kzalloc(sizeof(struct nvgpu_buddy_allocator), GFP_KERNEL);
1244 if (!a) 1246 if (!a)
1245 return -ENOMEM; 1247 return -ENOMEM;
1246 1248
1247 err = __gk20a_alloc_common_init(__a, name, a, false, &buddy_ops); 1249 err = __nvgpu_alloc_common_init(__a, name, a, false, &buddy_ops);
1248 if (err) 1250 if (err)
1249 goto fail; 1251 goto fail;
1250 1252
@@ -1287,7 +1289,7 @@ int __gk20a_buddy_allocator_init(struct gk20a *g, struct gk20a_allocator *__a,
1287 1289
1288 /* Shared buddy kmem_cache for all allocators. */ 1290 /* Shared buddy kmem_cache for all allocators. */
1289 if (!buddy_cache) 1291 if (!buddy_cache)
1290 buddy_cache = KMEM_CACHE(gk20a_buddy, 0); 1292 buddy_cache = KMEM_CACHE(nvgpu_buddy, 0);
1291 if (!buddy_cache) { 1293 if (!buddy_cache) {
1292 err = -ENOMEM; 1294 err = -ENOMEM;
1293 goto fail; 1295 goto fail;
@@ -1303,7 +1305,7 @@ int __gk20a_buddy_allocator_init(struct gk20a *g, struct gk20a_allocator *__a,
1303 wmb(); 1305 wmb();
1304 a->initialized = 1; 1306 a->initialized = 1;
1305 1307
1306 gk20a_init_alloc_debug(g, __a); 1308 nvgpu_init_alloc_debug(g, __a);
1307 alloc_dbg(__a, "New allocator: type buddy\n"); 1309 alloc_dbg(__a, "New allocator: type buddy\n");
1308 alloc_dbg(__a, " base 0x%llx\n", a->base); 1310 alloc_dbg(__a, " base 0x%llx\n", a->base);
1309 alloc_dbg(__a, " size 0x%llx\n", a->length); 1311 alloc_dbg(__a, " size 0x%llx\n", a->length);
@@ -1318,10 +1320,10 @@ fail:
1318 return err; 1320 return err;
1319} 1321}
1320 1322
1321int gk20a_buddy_allocator_init(struct gk20a *g, struct gk20a_allocator *a, 1323int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *a,
1322 const char *name, u64 base, u64 size, 1324 const char *name, u64 base, u64 size,
1323 u64 blk_size, u64 flags) 1325 u64 blk_size, u64 flags)
1324{ 1326{
1325 return __gk20a_buddy_allocator_init(g, a, NULL, name, 1327 return __nvgpu_buddy_allocator_init(g, a, NULL, name,
1326 base, size, blk_size, 0, 0); 1328 base, size, blk_size, 0, 0);
1327} 1329}
diff --git a/drivers/gpu/nvgpu/gk20a/buddy_allocator_priv.h b/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h
index bb8b307b..50a11f14 100644
--- a/drivers/gpu/nvgpu/gk20a/buddy_allocator_priv.h
+++ b/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h
@@ -20,17 +20,17 @@
20#include <linux/list.h> 20#include <linux/list.h>
21#include <linux/rbtree.h> 21#include <linux/rbtree.h>
22 22
23struct gk20a_allocator; 23struct nvgpu_allocator;
24struct vm_gk20a; 24struct vm_gk20a;
25 25
26/* 26/*
27 * Each buddy is an element in a binary tree. 27 * Each buddy is an element in a binary tree.
28 */ 28 */
29struct gk20a_buddy { 29struct nvgpu_buddy {
30 struct gk20a_buddy *parent; /* Parent node. */ 30 struct nvgpu_buddy *parent; /* Parent node. */
31 struct gk20a_buddy *buddy; /* This node's buddy. */ 31 struct nvgpu_buddy *buddy; /* This node's buddy. */
32 struct gk20a_buddy *left; /* Lower address sub-node. */ 32 struct nvgpu_buddy *left; /* Lower address sub-node. */
33 struct gk20a_buddy *right; /* Higher address sub-node. */ 33 struct nvgpu_buddy *right; /* Higher address sub-node. */
34 34
35 struct list_head buddy_entry; /* List entry for various lists. */ 35 struct list_head buddy_entry; /* List entry for various lists. */
36 struct rb_node alloced_entry; /* RB tree of allocations. */ 36 struct rb_node alloced_entry; /* RB tree of allocations. */
@@ -54,31 +54,31 @@ struct gk20a_buddy {
54}; 54};
55 55
56#define __buddy_flag_ops(flag, flag_up) \ 56#define __buddy_flag_ops(flag, flag_up) \
57 static inline int buddy_is_ ## flag(struct gk20a_buddy *b) \ 57 static inline int buddy_is_ ## flag(struct nvgpu_buddy *b) \
58 { \ 58 { \
59 return b->flags & BALLOC_BUDDY_ ## flag_up; \ 59 return b->flags & BALLOC_BUDDY_ ## flag_up; \
60 } \ 60 } \
61 static inline void buddy_set_ ## flag(struct gk20a_buddy *b) \ 61 static inline void buddy_set_ ## flag(struct nvgpu_buddy *b) \
62 { \ 62 { \
63 b->flags |= BALLOC_BUDDY_ ## flag_up; \ 63 b->flags |= BALLOC_BUDDY_ ## flag_up; \
64 } \ 64 } \
65 static inline void buddy_clr_ ## flag(struct gk20a_buddy *b) \ 65 static inline void buddy_clr_ ## flag(struct nvgpu_buddy *b) \
66 { \ 66 { \
67 b->flags &= ~BALLOC_BUDDY_ ## flag_up; \ 67 b->flags &= ~BALLOC_BUDDY_ ## flag_up; \
68 } 68 }
69 69
70/* 70/*
71 * int buddy_is_alloced(struct gk20a_buddy *b); 71 * int buddy_is_alloced(struct nvgpu_buddy *b);
72 * void buddy_set_alloced(struct gk20a_buddy *b); 72 * void buddy_set_alloced(struct nvgpu_buddy *b);
73 * void buddy_clr_alloced(struct gk20a_buddy *b); 73 * void buddy_clr_alloced(struct nvgpu_buddy *b);
74 * 74 *
75 * int buddy_is_split(struct gk20a_buddy *b); 75 * int buddy_is_split(struct nvgpu_buddy *b);
76 * void buddy_set_split(struct gk20a_buddy *b); 76 * void buddy_set_split(struct nvgpu_buddy *b);
77 * void buddy_clr_split(struct gk20a_buddy *b); 77 * void buddy_clr_split(struct nvgpu_buddy *b);
78 * 78 *
79 * int buddy_is_in_list(struct gk20a_buddy *b); 79 * int buddy_is_in_list(struct nvgpu_buddy *b);
80 * void buddy_set_in_list(struct gk20a_buddy *b); 80 * void buddy_set_in_list(struct nvgpu_buddy *b);
81 * void buddy_clr_in_list(struct gk20a_buddy *b); 81 * void buddy_clr_in_list(struct nvgpu_buddy *b);
82 */ 82 */
83__buddy_flag_ops(alloced, ALLOCED); 83__buddy_flag_ops(alloced, ALLOCED);
84__buddy_flag_ops(split, SPLIT); 84__buddy_flag_ops(split, SPLIT);
@@ -87,7 +87,7 @@ __buddy_flag_ops(in_list, IN_LIST);
87/* 87/*
88 * Keeps info for a fixed allocation. 88 * Keeps info for a fixed allocation.
89 */ 89 */
90struct gk20a_fixed_alloc { 90struct nvgpu_fixed_alloc {
91 struct list_head buddies; /* List of buddies. */ 91 struct list_head buddies; /* List of buddies. */
92 struct rb_node alloced_entry; /* RB tree of fixed allocations. */ 92 struct rb_node alloced_entry; /* RB tree of fixed allocations. */
93 93
@@ -105,8 +105,8 @@ struct gk20a_fixed_alloc {
105 * 105 *
106 * order_size is the size of an order 0 buddy. 106 * order_size is the size of an order 0 buddy.
107 */ 107 */
108struct gk20a_buddy_allocator { 108struct nvgpu_buddy_allocator {
109 struct gk20a_allocator *owner; /* Owner of this buddy allocator. */ 109 struct nvgpu_allocator *owner; /* Owner of this buddy allocator. */
110 struct vm_gk20a *vm; /* Parent VM - can be NULL. */ 110 struct vm_gk20a *vm; /* Parent VM - can be NULL. */
111 111
112 u64 base; /* Base address of the space. */ 112 u64 base; /* Base address of the space. */
@@ -153,38 +153,38 @@ struct gk20a_buddy_allocator {
153 u64 bytes_freed; 153 u64 bytes_freed;
154}; 154};
155 155
156static inline struct gk20a_buddy_allocator *buddy_allocator( 156static inline struct nvgpu_buddy_allocator *buddy_allocator(
157 struct gk20a_allocator *a) 157 struct nvgpu_allocator *a)
158{ 158{
159 return (struct gk20a_buddy_allocator *)(a)->priv; 159 return (struct nvgpu_buddy_allocator *)(a)->priv;
160} 160}
161 161
162static inline struct list_head *balloc_get_order_list( 162static inline struct list_head *balloc_get_order_list(
163 struct gk20a_buddy_allocator *a, int order) 163 struct nvgpu_buddy_allocator *a, int order)
164{ 164{
165 return &a->buddy_list[order]; 165 return &a->buddy_list[order];
166} 166}
167 167
168static inline u64 balloc_order_to_len(struct gk20a_buddy_allocator *a, 168static inline u64 balloc_order_to_len(struct nvgpu_buddy_allocator *a,
169 int order) 169 int order)
170{ 170{
171 return (1 << order) * a->blk_size; 171 return (1 << order) * a->blk_size;
172} 172}
173 173
174static inline u64 balloc_base_shift(struct gk20a_buddy_allocator *a, 174static inline u64 balloc_base_shift(struct nvgpu_buddy_allocator *a,
175 u64 base) 175 u64 base)
176{ 176{
177 return base - a->start; 177 return base - a->start;
178} 178}
179 179
180static inline u64 balloc_base_unshift(struct gk20a_buddy_allocator *a, 180static inline u64 balloc_base_unshift(struct nvgpu_buddy_allocator *a,
181 u64 base) 181 u64 base)
182{ 182{
183 return base + a->start; 183 return base + a->start;
184} 184}
185 185
186static inline struct gk20a_allocator *balloc_owner( 186static inline struct nvgpu_allocator *balloc_owner(
187 struct gk20a_buddy_allocator *a) 187 struct nvgpu_buddy_allocator *a)
188{ 188{
189 return a->owner; 189 return a->owner;
190} 190}
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_allocator_lockless.c b/drivers/gpu/nvgpu/common/mm/lockless_allocator.c
index 5b011d8c..e3063a42 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a_allocator_lockless.c
+++ b/drivers/gpu/nvgpu/common/mm/lockless_allocator.c
@@ -19,42 +19,43 @@
19#include <linux/vmalloc.h> 19#include <linux/vmalloc.h>
20#include <linux/atomic.h> 20#include <linux/atomic.h>
21 21
22#include "gk20a_allocator.h" 22#include <nvgpu/allocator.h>
23
23#include "lockless_allocator_priv.h" 24#include "lockless_allocator_priv.h"
24 25
25static u64 gk20a_lockless_alloc_length(struct gk20a_allocator *a) 26static u64 nvgpu_lockless_alloc_length(struct nvgpu_allocator *a)
26{ 27{
27 struct gk20a_lockless_allocator *pa = a->priv; 28 struct nvgpu_lockless_allocator *pa = a->priv;
28 29
29 return pa->length; 30 return pa->length;
30} 31}
31 32
32static u64 gk20a_lockless_alloc_base(struct gk20a_allocator *a) 33static u64 nvgpu_lockless_alloc_base(struct nvgpu_allocator *a)
33{ 34{
34 struct gk20a_lockless_allocator *pa = a->priv; 35 struct nvgpu_lockless_allocator *pa = a->priv;
35 36
36 return pa->base; 37 return pa->base;
37} 38}
38 39
39static int gk20a_lockless_alloc_inited(struct gk20a_allocator *a) 40static int nvgpu_lockless_alloc_inited(struct nvgpu_allocator *a)
40{ 41{
41 struct gk20a_lockless_allocator *pa = a->priv; 42 struct nvgpu_lockless_allocator *pa = a->priv;
42 int inited = pa->inited; 43 int inited = pa->inited;
43 44
44 rmb(); 45 rmb();
45 return inited; 46 return inited;
46} 47}
47 48
48static u64 gk20a_lockless_alloc_end(struct gk20a_allocator *a) 49static u64 nvgpu_lockless_alloc_end(struct nvgpu_allocator *a)
49{ 50{
50 struct gk20a_lockless_allocator *pa = a->priv; 51 struct nvgpu_lockless_allocator *pa = a->priv;
51 52
52 return pa->base + pa->length; 53 return pa->base + pa->length;
53} 54}
54 55
55static u64 gk20a_lockless_alloc(struct gk20a_allocator *a, u64 len) 56static u64 nvgpu_lockless_alloc(struct nvgpu_allocator *a, u64 len)
56{ 57{
57 struct gk20a_lockless_allocator *pa = a->priv; 58 struct nvgpu_lockless_allocator *pa = a->priv;
58 int head, new_head, ret; 59 int head, new_head, ret;
59 u64 addr = 0; 60 u64 addr = 0;
60 61
@@ -77,9 +78,9 @@ static u64 gk20a_lockless_alloc(struct gk20a_allocator *a, u64 len)
77 return addr; 78 return addr;
78} 79}
79 80
80static void gk20a_lockless_free(struct gk20a_allocator *a, u64 addr) 81static void nvgpu_lockless_free(struct nvgpu_allocator *a, u64 addr)
81{ 82{
82 struct gk20a_lockless_allocator *pa = a->priv; 83 struct nvgpu_lockless_allocator *pa = a->priv;
83 int head, ret; 84 int head, ret;
84 u64 cur_idx, rem; 85 u64 cur_idx, rem;
85 86
@@ -98,20 +99,20 @@ static void gk20a_lockless_free(struct gk20a_allocator *a, u64 addr)
98 } 99 }
99} 100}
100 101
101static void gk20a_lockless_alloc_destroy(struct gk20a_allocator *a) 102static void nvgpu_lockless_alloc_destroy(struct nvgpu_allocator *a)
102{ 103{
103 struct gk20a_lockless_allocator *pa = a->priv; 104 struct nvgpu_lockless_allocator *pa = a->priv;
104 105
105 gk20a_fini_alloc_debug(a); 106 nvgpu_fini_alloc_debug(a);
106 107
107 vfree(pa->next); 108 vfree(pa->next);
108 kfree(pa); 109 kfree(pa);
109} 110}
110 111
111static void gk20a_lockless_print_stats(struct gk20a_allocator *a, 112static void nvgpu_lockless_print_stats(struct nvgpu_allocator *a,
112 struct seq_file *s, int lock) 113 struct seq_file *s, int lock)
113{ 114{
114 struct gk20a_lockless_allocator *pa = a->priv; 115 struct nvgpu_lockless_allocator *pa = a->priv;
115 116
116 __alloc_pstat(s, a, "Lockless allocator params:\n"); 117 __alloc_pstat(s, a, "Lockless allocator params:\n");
117 __alloc_pstat(s, a, " start = 0x%llx\n", pa->base); 118 __alloc_pstat(s, a, " start = 0x%llx\n", pa->base);
@@ -125,21 +126,21 @@ static void gk20a_lockless_print_stats(struct gk20a_allocator *a,
125 pa->nr_nodes - atomic_read(&pa->nr_allocs)); 126 pa->nr_nodes - atomic_read(&pa->nr_allocs));
126} 127}
127 128
128static const struct gk20a_allocator_ops pool_ops = { 129static const struct nvgpu_allocator_ops pool_ops = {
129 .alloc = gk20a_lockless_alloc, 130 .alloc = nvgpu_lockless_alloc,
130 .free = gk20a_lockless_free, 131 .free = nvgpu_lockless_free,
131 132
132 .base = gk20a_lockless_alloc_base, 133 .base = nvgpu_lockless_alloc_base,
133 .length = gk20a_lockless_alloc_length, 134 .length = nvgpu_lockless_alloc_length,
134 .end = gk20a_lockless_alloc_end, 135 .end = nvgpu_lockless_alloc_end,
135 .inited = gk20a_lockless_alloc_inited, 136 .inited = nvgpu_lockless_alloc_inited,
136 137
137 .fini = gk20a_lockless_alloc_destroy, 138 .fini = nvgpu_lockless_alloc_destroy,
138 139
139 .print_stats = gk20a_lockless_print_stats, 140 .print_stats = nvgpu_lockless_print_stats,
140}; 141};
141 142
142int gk20a_lockless_allocator_init(struct gk20a *g, struct gk20a_allocator *__a, 143int nvgpu_lockless_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
143 const char *name, u64 base, u64 length, 144 const char *name, u64 base, u64 length,
144 u64 blk_size, u64 flags) 145 u64 blk_size, u64 flags)
145{ 146{
@@ -147,7 +148,7 @@ int gk20a_lockless_allocator_init(struct gk20a *g, struct gk20a_allocator *__a,
147 int err; 148 int err;
148 int nr_nodes; 149 int nr_nodes;
149 u64 count, rem; 150 u64 count, rem;
150 struct gk20a_lockless_allocator *a; 151 struct nvgpu_lockless_allocator *a;
151 152
152 if (!blk_size) 153 if (!blk_size)
153 return -EINVAL; 154 return -EINVAL;
@@ -161,11 +162,11 @@ int gk20a_lockless_allocator_init(struct gk20a *g, struct gk20a_allocator *__a,
161 if (!base || !count || count > INT_MAX) 162 if (!base || !count || count > INT_MAX)
162 return -EINVAL; 163 return -EINVAL;
163 164
164 a = kzalloc(sizeof(struct gk20a_lockless_allocator), GFP_KERNEL); 165 a = kzalloc(sizeof(struct nvgpu_lockless_allocator), GFP_KERNEL);
165 if (!a) 166 if (!a)
166 return -ENOMEM; 167 return -ENOMEM;
167 168
168 err = __gk20a_alloc_common_init(__a, name, a, false, &pool_ops); 169 err = __nvgpu_alloc_common_init(__a, name, a, false, &pool_ops);
169 if (err) 170 if (err)
170 goto fail; 171 goto fail;
171 172
@@ -191,7 +192,7 @@ int gk20a_lockless_allocator_init(struct gk20a *g, struct gk20a_allocator *__a,
191 wmb(); 192 wmb();
192 a->inited = true; 193 a->inited = true;
193 194
194 gk20a_init_alloc_debug(g, __a); 195 nvgpu_init_alloc_debug(g, __a);
195 alloc_dbg(__a, "New allocator: type lockless\n"); 196 alloc_dbg(__a, "New allocator: type lockless\n");
196 alloc_dbg(__a, " base 0x%llx\n", a->base); 197 alloc_dbg(__a, " base 0x%llx\n", a->base);
197 alloc_dbg(__a, " nodes %d\n", a->nr_nodes); 198 alloc_dbg(__a, " nodes %d\n", a->nr_nodes);
diff --git a/drivers/gpu/nvgpu/gk20a/lockless_allocator_priv.h b/drivers/gpu/nvgpu/common/mm/lockless_allocator_priv.h
index f9b03e0e..32421ac1 100644
--- a/drivers/gpu/nvgpu/gk20a/lockless_allocator_priv.h
+++ b/drivers/gpu/nvgpu/common/mm/lockless_allocator_priv.h
@@ -91,10 +91,10 @@
91#ifndef LOCKLESS_ALLOCATOR_PRIV_H 91#ifndef LOCKLESS_ALLOCATOR_PRIV_H
92#define LOCKLESS_ALLOCATOR_PRIV_H 92#define LOCKLESS_ALLOCATOR_PRIV_H
93 93
94struct gk20a_allocator; 94struct nvgpu_allocator;
95 95
96struct gk20a_lockless_allocator { 96struct nvgpu_lockless_allocator {
97 struct gk20a_allocator *owner; 97 struct nvgpu_allocator *owner;
98 98
99 u64 base; /* Base address of the space. */ 99 u64 base; /* Base address of the space. */
100 u64 length; /* Length of the space. */ 100 u64 length; /* Length of the space. */
@@ -112,10 +112,10 @@ struct gk20a_lockless_allocator {
112 atomic_t nr_allocs; 112 atomic_t nr_allocs;
113}; 113};
114 114
115static inline struct gk20a_lockless_allocator *lockless_allocator( 115static inline struct nvgpu_lockless_allocator *lockless_allocator(
116 struct gk20a_allocator *a) 116 struct nvgpu_allocator *a)
117{ 117{
118 return (struct gk20a_lockless_allocator *)(a)->priv; 118 return (struct nvgpu_lockless_allocator *)(a)->priv;
119} 119}
120 120
121#endif 121#endif
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_allocator.c b/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c
index 3129b07c..ebd779c0 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c
@@ -19,14 +19,15 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21 21
22#include "gk20a.h" 22#include <nvgpu/allocator.h>
23#include "mm_gk20a.h"
24#include "platform_gk20a.h"
25#include "gk20a_allocator.h"
26 23
27u32 gk20a_alloc_tracing_on; 24#include "gk20a/gk20a.h"
25#include "gk20a/mm_gk20a.h"
26#include "gk20a/platform_gk20a.h"
28 27
29u64 gk20a_alloc_length(struct gk20a_allocator *a) 28u32 nvgpu_alloc_tracing_on;
29
30u64 nvgpu_alloc_length(struct nvgpu_allocator *a)
30{ 31{
31 if (a->ops->length) 32 if (a->ops->length)
32 return a->ops->length(a); 33 return a->ops->length(a);
@@ -34,7 +35,7 @@ u64 gk20a_alloc_length(struct gk20a_allocator *a)
34 return 0; 35 return 0;
35} 36}
36 37
37u64 gk20a_alloc_base(struct gk20a_allocator *a) 38u64 nvgpu_alloc_base(struct nvgpu_allocator *a)
38{ 39{
39 if (a->ops->base) 40 if (a->ops->base)
40 return a->ops->base(a); 41 return a->ops->base(a);
@@ -42,7 +43,7 @@ u64 gk20a_alloc_base(struct gk20a_allocator *a)
42 return 0; 43 return 0;
43} 44}
44 45
45u64 gk20a_alloc_initialized(struct gk20a_allocator *a) 46u64 nvgpu_alloc_initialized(struct nvgpu_allocator *a)
46{ 47{
47 if (!a->ops || !a->ops->inited) 48 if (!a->ops || !a->ops->inited)
48 return 0; 49 return 0;
@@ -50,7 +51,7 @@ u64 gk20a_alloc_initialized(struct gk20a_allocator *a)
50 return a->ops->inited(a); 51 return a->ops->inited(a);
51} 52}
52 53
53u64 gk20a_alloc_end(struct gk20a_allocator *a) 54u64 nvgpu_alloc_end(struct nvgpu_allocator *a)
54{ 55{
55 if (a->ops->end) 56 if (a->ops->end)
56 return a->ops->end(a); 57 return a->ops->end(a);
@@ -58,7 +59,7 @@ u64 gk20a_alloc_end(struct gk20a_allocator *a)
58 return 0; 59 return 0;
59} 60}
60 61
61u64 gk20a_alloc_space(struct gk20a_allocator *a) 62u64 nvgpu_alloc_space(struct nvgpu_allocator *a)
62{ 63{
63 if (a->ops->space) 64 if (a->ops->space)
64 return a->ops->space(a); 65 return a->ops->space(a);
@@ -66,17 +67,17 @@ u64 gk20a_alloc_space(struct gk20a_allocator *a)
66 return 0; 67 return 0;
67} 68}
68 69
69u64 gk20a_alloc(struct gk20a_allocator *a, u64 len) 70u64 nvgpu_alloc(struct nvgpu_allocator *a, u64 len)
70{ 71{
71 return a->ops->alloc(a, len); 72 return a->ops->alloc(a, len);
72} 73}
73 74
74void gk20a_free(struct gk20a_allocator *a, u64 addr) 75void nvgpu_free(struct nvgpu_allocator *a, u64 addr)
75{ 76{
76 a->ops->free(a, addr); 77 a->ops->free(a, addr);
77} 78}
78 79
79u64 gk20a_alloc_fixed(struct gk20a_allocator *a, u64 base, u64 len) 80u64 nvgpu_alloc_fixed(struct nvgpu_allocator *a, u64 base, u64 len)
80{ 81{
81 if (a->ops->alloc_fixed) 82 if (a->ops->alloc_fixed)
82 return a->ops->alloc_fixed(a, base, len); 83 return a->ops->alloc_fixed(a, base, len);
@@ -84,7 +85,7 @@ u64 gk20a_alloc_fixed(struct gk20a_allocator *a, u64 base, u64 len)
84 return 0; 85 return 0;
85} 86}
86 87
87void gk20a_free_fixed(struct gk20a_allocator *a, u64 base, u64 len) 88void nvgpu_free_fixed(struct nvgpu_allocator *a, u64 base, u64 len)
88{ 89{
89 /* 90 /*
90 * If this operation is not defined for the allocator then just do 91 * If this operation is not defined for the allocator then just do
@@ -95,8 +96,8 @@ void gk20a_free_fixed(struct gk20a_allocator *a, u64 base, u64 len)
95 a->ops->free_fixed(a, base, len); 96 a->ops->free_fixed(a, base, len);
96} 97}
97 98
98int gk20a_alloc_reserve_carveout(struct gk20a_allocator *a, 99int nvgpu_alloc_reserve_carveout(struct nvgpu_allocator *a,
99 struct gk20a_alloc_carveout *co) 100 struct nvgpu_alloc_carveout *co)
100{ 101{
101 if (a->ops->reserve_carveout) 102 if (a->ops->reserve_carveout)
102 return a->ops->reserve_carveout(a, co); 103 return a->ops->reserve_carveout(a, co);
@@ -104,25 +105,25 @@ int gk20a_alloc_reserve_carveout(struct gk20a_allocator *a,
104 return -ENODEV; 105 return -ENODEV;
105} 106}
106 107
107void gk20a_alloc_release_carveout(struct gk20a_allocator *a, 108void nvgpu_alloc_release_carveout(struct nvgpu_allocator *a,
108 struct gk20a_alloc_carveout *co) 109 struct nvgpu_alloc_carveout *co)
109{ 110{
110 if (a->ops->release_carveout) 111 if (a->ops->release_carveout)
111 a->ops->release_carveout(a, co); 112 a->ops->release_carveout(a, co);
112} 113}
113 114
114void gk20a_alloc_destroy(struct gk20a_allocator *a) 115void nvgpu_alloc_destroy(struct nvgpu_allocator *a)
115{ 116{
116 a->ops->fini(a); 117 a->ops->fini(a);
117 memset(a, 0, sizeof(*a)); 118 memset(a, 0, sizeof(*a));
118} 119}
119 120
120/* 121/*
121 * Handle the common init stuff for a gk20a_allocator. 122 * Handle the common init stuff for a nvgpu_allocator.
122 */ 123 */
123int __gk20a_alloc_common_init(struct gk20a_allocator *a, 124int __nvgpu_alloc_common_init(struct nvgpu_allocator *a,
124 const char *name, void *priv, bool dbg, 125 const char *name, void *priv, bool dbg,
125 const struct gk20a_allocator_ops *ops) 126 const struct nvgpu_allocator_ops *ops)
126{ 127{
127 if (!ops) 128 if (!ops)
128 return -EINVAL; 129 return -EINVAL;
@@ -145,7 +146,7 @@ int __gk20a_alloc_common_init(struct gk20a_allocator *a,
145 return 0; 146 return 0;
146} 147}
147 148
148void gk20a_alloc_print_stats(struct gk20a_allocator *__a, 149void nvgpu_alloc_print_stats(struct nvgpu_allocator *__a,
149 struct seq_file *s, int lock) 150 struct seq_file *s, int lock)
150{ 151{
151 __a->ops->print_stats(__a, s, lock); 152 __a->ops->print_stats(__a, s, lock);
@@ -154,9 +155,9 @@ void gk20a_alloc_print_stats(struct gk20a_allocator *__a,
154#ifdef CONFIG_DEBUG_FS 155#ifdef CONFIG_DEBUG_FS
155static int __alloc_show(struct seq_file *s, void *unused) 156static int __alloc_show(struct seq_file *s, void *unused)
156{ 157{
157 struct gk20a_allocator *a = s->private; 158 struct nvgpu_allocator *a = s->private;
158 159
159 gk20a_alloc_print_stats(a, s, 1); 160 nvgpu_alloc_print_stats(a, s, 1);
160 161
161 return 0; 162 return 0;
162} 163}
@@ -174,7 +175,7 @@ static const struct file_operations __alloc_fops = {
174}; 175};
175#endif 176#endif
176 177
177void gk20a_init_alloc_debug(struct gk20a *g, struct gk20a_allocator *a) 178void nvgpu_init_alloc_debug(struct gk20a *g, struct nvgpu_allocator *a)
178{ 179{
179#ifdef CONFIG_DEBUG_FS 180#ifdef CONFIG_DEBUG_FS
180 if (!g->debugfs_allocators) 181 if (!g->debugfs_allocators)
@@ -186,7 +187,7 @@ void gk20a_init_alloc_debug(struct gk20a *g, struct gk20a_allocator *a)
186#endif 187#endif
187} 188}
188 189
189void gk20a_fini_alloc_debug(struct gk20a_allocator *a) 190void nvgpu_fini_alloc_debug(struct nvgpu_allocator *a)
190{ 191{
191#ifdef CONFIG_DEBUG_FS 192#ifdef CONFIG_DEBUG_FS
192 if (!IS_ERR_OR_NULL(a->debugfs_entry)) 193 if (!IS_ERR_OR_NULL(a->debugfs_entry))
@@ -194,7 +195,7 @@ void gk20a_fini_alloc_debug(struct gk20a_allocator *a)
194#endif 195#endif
195} 196}
196 197
197void gk20a_alloc_debugfs_init(struct device *dev) 198void nvgpu_alloc_debugfs_init(struct device *dev)
198{ 199{
199#ifdef CONFIG_DEBUG_FS 200#ifdef CONFIG_DEBUG_FS
200 struct gk20a_platform *platform = dev_get_drvdata(dev); 201 struct gk20a_platform *platform = dev_get_drvdata(dev);
@@ -206,6 +207,6 @@ void gk20a_alloc_debugfs_init(struct device *dev)
206 return; 207 return;
207 208
208 debugfs_create_u32("tracing", 0664, g->debugfs_allocators, 209 debugfs_create_u32("tracing", 0664, g->debugfs_allocators,
209 &gk20a_alloc_tracing_on); 210 &nvgpu_alloc_tracing_on);
210#endif 211#endif
211} 212}
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_allocator_page.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c
index 9717a726..c61b2238 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a_allocator_page.c
+++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c
@@ -19,9 +19,10 @@
19#include <linux/bitops.h> 19#include <linux/bitops.h>
20#include <linux/mm.h> 20#include <linux/mm.h>
21 21
22#include "gk20a_allocator.h" 22#include <nvgpu/allocator.h>
23#include <nvgpu/page_allocator.h>
24
23#include "buddy_allocator_priv.h" 25#include "buddy_allocator_priv.h"
24#include "page_allocator_priv.h"
25 26
26#define palloc_dbg(a, fmt, arg...) \ 27#define palloc_dbg(a, fmt, arg...) \
27 alloc_dbg(palloc_owner(a), fmt, ##arg) 28 alloc_dbg(palloc_owner(a), fmt, ##arg)
@@ -81,59 +82,59 @@ static inline void del_slab_page_from_full(struct page_alloc_slab *slab,
81 page->state = SP_NONE; 82 page->state = SP_NONE;
82} 83}
83 84
84static u64 gk20a_page_alloc_length(struct gk20a_allocator *a) 85static u64 nvgpu_page_alloc_length(struct nvgpu_allocator *a)
85{ 86{
86 struct gk20a_page_allocator *va = a->priv; 87 struct nvgpu_page_allocator *va = a->priv;
87 88
88 return gk20a_alloc_length(&va->source_allocator); 89 return nvgpu_alloc_length(&va->source_allocator);
89} 90}
90 91
91static u64 gk20a_page_alloc_base(struct gk20a_allocator *a) 92static u64 nvgpu_page_alloc_base(struct nvgpu_allocator *a)
92{ 93{
93 struct gk20a_page_allocator *va = a->priv; 94 struct nvgpu_page_allocator *va = a->priv;
94 95
95 return gk20a_alloc_base(&va->source_allocator); 96 return nvgpu_alloc_base(&va->source_allocator);
96} 97}
97 98
98static int gk20a_page_alloc_inited(struct gk20a_allocator *a) 99static int nvgpu_page_alloc_inited(struct nvgpu_allocator *a)
99{ 100{
100 struct gk20a_page_allocator *va = a->priv; 101 struct nvgpu_page_allocator *va = a->priv;
101 102
102 return gk20a_alloc_initialized(&va->source_allocator); 103 return nvgpu_alloc_initialized(&va->source_allocator);
103} 104}
104 105
105static u64 gk20a_page_alloc_end(struct gk20a_allocator *a) 106static u64 nvgpu_page_alloc_end(struct nvgpu_allocator *a)
106{ 107{
107 struct gk20a_page_allocator *va = a->priv; 108 struct nvgpu_page_allocator *va = a->priv;
108 109
109 return gk20a_alloc_end(&va->source_allocator); 110 return nvgpu_alloc_end(&va->source_allocator);
110} 111}
111 112
112static u64 gk20a_page_alloc_space(struct gk20a_allocator *a) 113static u64 nvgpu_page_alloc_space(struct nvgpu_allocator *a)
113{ 114{
114 struct gk20a_page_allocator *va = a->priv; 115 struct nvgpu_page_allocator *va = a->priv;
115 116
116 return gk20a_alloc_space(&va->source_allocator); 117 return nvgpu_alloc_space(&va->source_allocator);
117} 118}
118 119
119static int gk20a_page_reserve_co(struct gk20a_allocator *a, 120static int nvgpu_page_reserve_co(struct nvgpu_allocator *a,
120 struct gk20a_alloc_carveout *co) 121 struct nvgpu_alloc_carveout *co)
121{ 122{
122 struct gk20a_page_allocator *va = a->priv; 123 struct nvgpu_page_allocator *va = a->priv;
123 124
124 return gk20a_alloc_reserve_carveout(&va->source_allocator, co); 125 return nvgpu_alloc_reserve_carveout(&va->source_allocator, co);
125} 126}
126 127
127static void gk20a_page_release_co(struct gk20a_allocator *a, 128static void nvgpu_page_release_co(struct nvgpu_allocator *a,
128 struct gk20a_alloc_carveout *co) 129 struct nvgpu_alloc_carveout *co)
129{ 130{
130 struct gk20a_page_allocator *va = a->priv; 131 struct nvgpu_page_allocator *va = a->priv;
131 132
132 gk20a_alloc_release_carveout(&va->source_allocator, co); 133 nvgpu_alloc_release_carveout(&va->source_allocator, co);
133} 134}
134 135
135static void __gk20a_free_pages(struct gk20a_page_allocator *a, 136static void __nvgpu_free_pages(struct nvgpu_page_allocator *a,
136 struct gk20a_page_alloc *alloc, 137 struct nvgpu_page_alloc *alloc,
137 bool free_buddy_alloc) 138 bool free_buddy_alloc)
138{ 139{
139 struct page_alloc_chunk *chunk; 140 struct page_alloc_chunk *chunk;
@@ -145,22 +146,22 @@ static void __gk20a_free_pages(struct gk20a_page_allocator *a,
145 list_del(&chunk->list_entry); 146 list_del(&chunk->list_entry);
146 147
147 if (free_buddy_alloc) 148 if (free_buddy_alloc)
148 gk20a_free(&a->source_allocator, chunk->base); 149 nvgpu_free(&a->source_allocator, chunk->base);
149 kfree(chunk); 150 kfree(chunk);
150 } 151 }
151 152
152 kfree(alloc); 153 kfree(alloc);
153} 154}
154 155
155static int __insert_page_alloc(struct gk20a_page_allocator *a, 156static int __insert_page_alloc(struct nvgpu_page_allocator *a,
156 struct gk20a_page_alloc *alloc) 157 struct nvgpu_page_alloc *alloc)
157{ 158{
158 struct rb_node **new = &a->allocs.rb_node; 159 struct rb_node **new = &a->allocs.rb_node;
159 struct rb_node *parent = NULL; 160 struct rb_node *parent = NULL;
160 161
161 while (*new) { 162 while (*new) {
162 struct gk20a_page_alloc *tmp = 163 struct nvgpu_page_alloc *tmp =
163 container_of(*new, struct gk20a_page_alloc, 164 container_of(*new, struct nvgpu_page_alloc,
164 tree_entry); 165 tree_entry);
165 166
166 parent = *new; 167 parent = *new;
@@ -180,15 +181,15 @@ static int __insert_page_alloc(struct gk20a_page_allocator *a,
180 return 0; 181 return 0;
181} 182}
182 183
183static struct gk20a_page_alloc *__find_page_alloc( 184static struct nvgpu_page_alloc *__find_page_alloc(
184 struct gk20a_page_allocator *a, 185 struct nvgpu_page_allocator *a,
185 u64 addr) 186 u64 addr)
186{ 187{
187 struct rb_node *node = a->allocs.rb_node; 188 struct rb_node *node = a->allocs.rb_node;
188 struct gk20a_page_alloc *alloc; 189 struct nvgpu_page_alloc *alloc;
189 190
190 while (node) { 191 while (node) {
191 alloc = container_of(node, struct gk20a_page_alloc, tree_entry); 192 alloc = container_of(node, struct nvgpu_page_alloc, tree_entry);
192 193
193 if (addr < alloc->base) 194 if (addr < alloc->base)
194 node = node->rb_left; 195 node = node->rb_left;
@@ -207,7 +208,7 @@ static struct gk20a_page_alloc *__find_page_alloc(
207} 208}
208 209
209static struct page_alloc_slab_page *alloc_slab_page( 210static struct page_alloc_slab_page *alloc_slab_page(
210 struct gk20a_page_allocator *a, 211 struct nvgpu_page_allocator *a,
211 struct page_alloc_slab *slab) 212 struct page_alloc_slab *slab)
212{ 213{
213 struct page_alloc_slab_page *slab_page; 214 struct page_alloc_slab_page *slab_page;
@@ -220,7 +221,7 @@ static struct page_alloc_slab_page *alloc_slab_page(
220 221
221 memset(slab_page, 0, sizeof(*slab_page)); 222 memset(slab_page, 0, sizeof(*slab_page));
222 223
223 slab_page->page_addr = gk20a_alloc(&a->source_allocator, a->page_size); 224 slab_page->page_addr = nvgpu_alloc(&a->source_allocator, a->page_size);
224 if (!slab_page->page_addr) { 225 if (!slab_page->page_addr) {
225 kfree(slab_page); 226 kfree(slab_page);
226 palloc_dbg(a, "OOM: vidmem is full!\n"); 227 palloc_dbg(a, "OOM: vidmem is full!\n");
@@ -242,7 +243,7 @@ static struct page_alloc_slab_page *alloc_slab_page(
242 return slab_page; 243 return slab_page;
243} 244}
244 245
245static void free_slab_page(struct gk20a_page_allocator *a, 246static void free_slab_page(struct nvgpu_page_allocator *a,
246 struct page_alloc_slab_page *slab_page) 247 struct page_alloc_slab_page *slab_page)
247{ 248{
248 palloc_dbg(a, "Freeing slab page @ 0x%012llx\n", slab_page->page_addr); 249 palloc_dbg(a, "Freeing slab page @ 0x%012llx\n", slab_page->page_addr);
@@ -251,7 +252,7 @@ static void free_slab_page(struct gk20a_page_allocator *a,
251 slab_page->nr_objects_alloced != 0 || 252 slab_page->nr_objects_alloced != 0 ||
252 slab_page->bitmap != 0); 253 slab_page->bitmap != 0);
253 254
254 gk20a_free(&a->source_allocator, slab_page->page_addr); 255 nvgpu_free(&a->source_allocator, slab_page->page_addr);
255 a->pages_freed++; 256 a->pages_freed++;
256 257
257 kmem_cache_free(page_alloc_slab_page_cache, slab_page); 258 kmem_cache_free(page_alloc_slab_page_cache, slab_page);
@@ -261,9 +262,9 @@ static void free_slab_page(struct gk20a_page_allocator *a,
261 * This expects @alloc to have 1 empty page_alloc_chunk already added to the 262 * This expects @alloc to have 1 empty page_alloc_chunk already added to the
262 * alloc_chunks list. 263 * alloc_chunks list.
263 */ 264 */
264static int __do_slab_alloc(struct gk20a_page_allocator *a, 265static int __do_slab_alloc(struct nvgpu_page_allocator *a,
265 struct page_alloc_slab *slab, 266 struct page_alloc_slab *slab,
266 struct gk20a_page_alloc *alloc) 267 struct nvgpu_page_alloc *alloc)
267{ 268{
268 struct page_alloc_slab_page *slab_page = NULL; 269 struct page_alloc_slab_page *slab_page = NULL;
269 struct page_alloc_chunk *chunk; 270 struct page_alloc_chunk *chunk;
@@ -317,7 +318,7 @@ static int __do_slab_alloc(struct gk20a_page_allocator *a,
317 BUG(); /* Should be impossible to hit this. */ 318 BUG(); /* Should be impossible to hit this. */
318 319
319 /* 320 /*
320 * Handle building the gk20a_page_alloc struct. We expect one 321 * Handle building the nvgpu_page_alloc struct. We expect one
321 * page_alloc_chunk to be present. 322 * page_alloc_chunk to be present.
322 */ 323 */
323 alloc->slab_page = slab_page; 324 alloc->slab_page = slab_page;
@@ -336,12 +337,12 @@ static int __do_slab_alloc(struct gk20a_page_allocator *a,
336/* 337/*
337 * Allocate from a slab instead of directly from the page allocator. 338 * Allocate from a slab instead of directly from the page allocator.
338 */ 339 */
339static struct gk20a_page_alloc *__gk20a_alloc_slab( 340static struct nvgpu_page_alloc *__nvgpu_alloc_slab(
340 struct gk20a_page_allocator *a, u64 len) 341 struct nvgpu_page_allocator *a, u64 len)
341{ 342{
342 int err, slab_nr; 343 int err, slab_nr;
343 struct page_alloc_slab *slab; 344 struct page_alloc_slab *slab;
344 struct gk20a_page_alloc *alloc = NULL; 345 struct nvgpu_page_alloc *alloc = NULL;
345 struct page_alloc_chunk *chunk = NULL; 346 struct page_alloc_chunk *chunk = NULL;
346 347
347 /* 348 /*
@@ -381,8 +382,8 @@ fail:
381 return NULL; 382 return NULL;
382} 383}
383 384
384static void __gk20a_free_slab(struct gk20a_page_allocator *a, 385static void __nvgpu_free_slab(struct nvgpu_page_allocator *a,
385 struct gk20a_page_alloc *alloc) 386 struct nvgpu_page_alloc *alloc)
386{ 387{
387 struct page_alloc_slab_page *slab_page = alloc->slab_page; 388 struct page_alloc_slab_page *slab_page = alloc->slab_page;
388 struct page_alloc_slab *slab = slab_page->owner; 389 struct page_alloc_slab *slab = slab_page->owner;
@@ -423,7 +424,7 @@ static void __gk20a_free_slab(struct gk20a_page_allocator *a,
423 /* 424 /*
424 * Now handle the page_alloc. 425 * Now handle the page_alloc.
425 */ 426 */
426 __gk20a_free_pages(a, alloc, false); 427 __nvgpu_free_pages(a, alloc, false);
427 a->nr_slab_frees++; 428 a->nr_slab_frees++;
428 429
429 return; 430 return;
@@ -435,10 +436,10 @@ static void __gk20a_free_slab(struct gk20a_page_allocator *a,
435 * fragmentation in the space this allocator will collate smaller non-contiguous 436 * fragmentation in the space this allocator will collate smaller non-contiguous
436 * allocations together if necessary. 437 * allocations together if necessary.
437 */ 438 */
438static struct gk20a_page_alloc *__do_gk20a_alloc_pages( 439static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages(
439 struct gk20a_page_allocator *a, u64 pages) 440 struct nvgpu_page_allocator *a, u64 pages)
440{ 441{
441 struct gk20a_page_alloc *alloc; 442 struct nvgpu_page_alloc *alloc;
442 struct page_alloc_chunk *c; 443 struct page_alloc_chunk *c;
443 u64 max_chunk_len = pages << a->page_shift; 444 u64 max_chunk_len = pages << a->page_shift;
444 int i = 0; 445 int i = 0;
@@ -476,7 +477,7 @@ static struct gk20a_page_alloc *__do_gk20a_alloc_pages(
476 * allocator (i.e the allocator is OOM). 477 * allocator (i.e the allocator is OOM).
477 */ 478 */
478 do { 479 do {
479 chunk_addr = gk20a_alloc(&a->source_allocator, 480 chunk_addr = nvgpu_alloc(&a->source_allocator,
480 chunk_len); 481 chunk_len);
481 482
482 /* Divide by 2 and try again */ 483 /* Divide by 2 and try again */
@@ -497,7 +498,7 @@ static struct gk20a_page_alloc *__do_gk20a_alloc_pages(
497 498
498 c = kmem_cache_alloc(page_alloc_chunk_cache, GFP_KERNEL); 499 c = kmem_cache_alloc(page_alloc_chunk_cache, GFP_KERNEL);
499 if (!c) { 500 if (!c) {
500 gk20a_free(&a->source_allocator, chunk_addr); 501 nvgpu_free(&a->source_allocator, chunk_addr);
501 goto fail_cleanup; 502 goto fail_cleanup;
502 } 503 }
503 504
@@ -522,7 +523,7 @@ fail_cleanup:
522 c = list_first_entry(&alloc->alloc_chunks, 523 c = list_first_entry(&alloc->alloc_chunks,
523 struct page_alloc_chunk, list_entry); 524 struct page_alloc_chunk, list_entry);
524 list_del(&c->list_entry); 525 list_del(&c->list_entry);
525 gk20a_free(&a->source_allocator, c->base); 526 nvgpu_free(&a->source_allocator, c->base);
526 kfree(c); 527 kfree(c);
527 } 528 }
528 kfree(alloc); 529 kfree(alloc);
@@ -530,17 +531,17 @@ fail:
530 return ERR_PTR(-ENOMEM); 531 return ERR_PTR(-ENOMEM);
531} 532}
532 533
533static struct gk20a_page_alloc *__gk20a_alloc_pages( 534static struct nvgpu_page_alloc *__nvgpu_alloc_pages(
534 struct gk20a_page_allocator *a, u64 len) 535 struct nvgpu_page_allocator *a, u64 len)
535{ 536{
536 struct gk20a_page_alloc *alloc = NULL; 537 struct nvgpu_page_alloc *alloc = NULL;
537 struct page_alloc_chunk *c; 538 struct page_alloc_chunk *c;
538 u64 pages; 539 u64 pages;
539 int i = 0; 540 int i = 0;
540 541
541 pages = ALIGN(len, a->page_size) >> a->page_shift; 542 pages = ALIGN(len, a->page_size) >> a->page_shift;
542 543
543 alloc = __do_gk20a_alloc_pages(a, pages); 544 alloc = __do_nvgpu_alloc_pages(a, pages);
544 if (IS_ERR(alloc)) { 545 if (IS_ERR(alloc)) {
545 palloc_dbg(a, "Alloc 0x%llx (%llu) (failed)\n", 546 palloc_dbg(a, "Alloc 0x%llx (%llu) (failed)\n",
546 pages << a->page_shift, pages); 547 pages << a->page_shift, pages);
@@ -561,16 +562,16 @@ static struct gk20a_page_alloc *__gk20a_alloc_pages(
561 * Allocate enough pages to satisfy @len. Page size is determined at 562 * Allocate enough pages to satisfy @len. Page size is determined at
562 * initialization of the allocator. 563 * initialization of the allocator.
563 * 564 *
564 * The return is actually a pointer to a struct gk20a_page_alloc pointer. This 565 * The return is actually a pointer to a struct nvgpu_page_alloc pointer. This
565 * is because it doesn't make a lot of sense to return the address of the first 566 * is because it doesn't make a lot of sense to return the address of the first
566 * page in the list of pages (since they could be discontiguous). This has 567 * page in the list of pages (since they could be discontiguous). This has
567 * precedent in the dma_alloc APIs, though, it's really just an annoying 568 * precedent in the dma_alloc APIs, though, it's really just an annoying
568 * artifact of the fact that the gk20a_alloc() API requires a u64 return type. 569 * artifact of the fact that the nvgpu_alloc() API requires a u64 return type.
569 */ 570 */
570static u64 gk20a_page_alloc(struct gk20a_allocator *__a, u64 len) 571static u64 nvgpu_page_alloc(struct nvgpu_allocator *__a, u64 len)
571{ 572{
572 struct gk20a_page_allocator *a = page_allocator(__a); 573 struct nvgpu_page_allocator *a = page_allocator(__a);
573 struct gk20a_page_alloc *alloc = NULL; 574 struct nvgpu_page_alloc *alloc = NULL;
574 u64 real_len; 575 u64 real_len;
575 576
576 /* 577 /*
@@ -583,9 +584,9 @@ static u64 gk20a_page_alloc(struct gk20a_allocator *__a, u64 len)
583 alloc_lock(__a); 584 alloc_lock(__a);
584 if (a->flags & GPU_ALLOC_4K_VIDMEM_PAGES && 585 if (a->flags & GPU_ALLOC_4K_VIDMEM_PAGES &&
585 real_len <= (a->page_size / 2)) 586 real_len <= (a->page_size / 2))
586 alloc = __gk20a_alloc_slab(a, real_len); 587 alloc = __nvgpu_alloc_slab(a, real_len);
587 else 588 else
588 alloc = __gk20a_alloc_pages(a, real_len); 589 alloc = __nvgpu_alloc_pages(a, real_len);
589 590
590 if (!alloc) { 591 if (!alloc) {
591 alloc_unlock(__a); 592 alloc_unlock(__a);
@@ -606,13 +607,13 @@ static u64 gk20a_page_alloc(struct gk20a_allocator *__a, u64 len)
606} 607}
607 608
608/* 609/*
609 * Note: this will remove the gk20a_page_alloc struct from the RB tree 610 * Note: this will remove the nvgpu_page_alloc struct from the RB tree
610 * if it's found. 611 * if it's found.
611 */ 612 */
612static void gk20a_page_free(struct gk20a_allocator *__a, u64 base) 613static void nvgpu_page_free(struct nvgpu_allocator *__a, u64 base)
613{ 614{
614 struct gk20a_page_allocator *a = page_allocator(__a); 615 struct nvgpu_page_allocator *a = page_allocator(__a);
615 struct gk20a_page_alloc *alloc; 616 struct nvgpu_page_alloc *alloc;
616 617
617 alloc_lock(__a); 618 alloc_lock(__a);
618 619
@@ -620,7 +621,7 @@ static void gk20a_page_free(struct gk20a_allocator *__a, u64 base)
620 alloc = __find_page_alloc(a, base); 621 alloc = __find_page_alloc(a, base);
621 else 622 else
622 alloc = __find_page_alloc(a, 623 alloc = __find_page_alloc(a,
623 ((struct gk20a_page_alloc *)(uintptr_t)base)->base); 624 ((struct nvgpu_page_alloc *)(uintptr_t)base)->base);
624 625
625 if (!alloc) { 626 if (!alloc) {
626 palloc_dbg(a, "Hrm, found no alloc?\n"); 627 palloc_dbg(a, "Hrm, found no alloc?\n");
@@ -636,20 +637,20 @@ static void gk20a_page_free(struct gk20a_allocator *__a, u64 base)
636 * Frees *alloc. 637 * Frees *alloc.
637 */ 638 */
638 if (alloc->slab_page) { 639 if (alloc->slab_page) {
639 __gk20a_free_slab(a, alloc); 640 __nvgpu_free_slab(a, alloc);
640 } else { 641 } else {
641 a->pages_freed += (alloc->length >> a->page_shift); 642 a->pages_freed += (alloc->length >> a->page_shift);
642 __gk20a_free_pages(a, alloc, true); 643 __nvgpu_free_pages(a, alloc, true);
643 } 644 }
644 645
645done: 646done:
646 alloc_unlock(__a); 647 alloc_unlock(__a);
647} 648}
648 649
649static struct gk20a_page_alloc *__gk20a_alloc_pages_fixed( 650static struct nvgpu_page_alloc *__nvgpu_alloc_pages_fixed(
650 struct gk20a_page_allocator *a, u64 base, u64 length) 651 struct nvgpu_page_allocator *a, u64 base, u64 length)
651{ 652{
652 struct gk20a_page_alloc *alloc; 653 struct nvgpu_page_alloc *alloc;
653 struct page_alloc_chunk *c; 654 struct page_alloc_chunk *c;
654 655
655 alloc = kmem_cache_alloc(page_alloc_cache, GFP_KERNEL); 656 alloc = kmem_cache_alloc(page_alloc_cache, GFP_KERNEL);
@@ -657,9 +658,9 @@ static struct gk20a_page_alloc *__gk20a_alloc_pages_fixed(
657 if (!alloc || !c) 658 if (!alloc || !c)
658 goto fail; 659 goto fail;
659 660
660 alloc->base = gk20a_alloc_fixed(&a->source_allocator, base, length); 661 alloc->base = nvgpu_alloc_fixed(&a->source_allocator, base, length);
661 if (!alloc->base) { 662 if (!alloc->base) {
662 WARN(1, "gk20a: failed to fixed alloc pages @ 0x%010llx", base); 663 WARN(1, "nvgpu: failed to fixed alloc pages @ 0x%010llx", base);
663 goto fail; 664 goto fail;
664 } 665 }
665 666
@@ -679,11 +680,11 @@ fail:
679 return ERR_PTR(-ENOMEM); 680 return ERR_PTR(-ENOMEM);
680} 681}
681 682
682static u64 gk20a_page_alloc_fixed(struct gk20a_allocator *__a, 683static u64 nvgpu_page_alloc_fixed(struct nvgpu_allocator *__a,
683 u64 base, u64 len) 684 u64 base, u64 len)
684{ 685{
685 struct gk20a_page_allocator *a = page_allocator(__a); 686 struct nvgpu_page_allocator *a = page_allocator(__a);
686 struct gk20a_page_alloc *alloc = NULL; 687 struct nvgpu_page_alloc *alloc = NULL;
687 struct page_alloc_chunk *c; 688 struct page_alloc_chunk *c;
688 u64 aligned_len, pages; 689 u64 aligned_len, pages;
689 int i = 0; 690 int i = 0;
@@ -693,7 +694,7 @@ static u64 gk20a_page_alloc_fixed(struct gk20a_allocator *__a,
693 694
694 alloc_lock(__a); 695 alloc_lock(__a);
695 696
696 alloc = __gk20a_alloc_pages_fixed(a, base, aligned_len); 697 alloc = __nvgpu_alloc_pages_fixed(a, base, aligned_len);
697 if (IS_ERR(alloc)) { 698 if (IS_ERR(alloc)) {
698 alloc_unlock(__a); 699 alloc_unlock(__a);
699 return 0; 700 return 0;
@@ -718,11 +719,11 @@ static u64 gk20a_page_alloc_fixed(struct gk20a_allocator *__a,
718 return (u64) (uintptr_t) alloc; 719 return (u64) (uintptr_t) alloc;
719} 720}
720 721
721static void gk20a_page_free_fixed(struct gk20a_allocator *__a, 722static void nvgpu_page_free_fixed(struct nvgpu_allocator *__a,
722 u64 base, u64 len) 723 u64 base, u64 len)
723{ 724{
724 struct gk20a_page_allocator *a = page_allocator(__a); 725 struct nvgpu_page_allocator *a = page_allocator(__a);
725 struct gk20a_page_alloc *alloc; 726 struct nvgpu_page_alloc *alloc;
726 727
727 alloc_lock(__a); 728 alloc_lock(__a);
728 729
@@ -731,7 +732,7 @@ static void gk20a_page_free_fixed(struct gk20a_allocator *__a,
731 if (!alloc) 732 if (!alloc)
732 goto done; 733 goto done;
733 } else { 734 } else {
734 alloc = (struct gk20a_page_alloc *) (uintptr_t) base; 735 alloc = (struct nvgpu_page_alloc *) (uintptr_t) base;
735 } 736 }
736 737
737 palloc_dbg(a, "Free [fixed] 0x%010llx + 0x%llx\n", 738 palloc_dbg(a, "Free [fixed] 0x%010llx + 0x%llx\n",
@@ -746,15 +747,15 @@ static void gk20a_page_free_fixed(struct gk20a_allocator *__a,
746 * allocs. This would have to be updated if the underlying 747 * allocs. This would have to be updated if the underlying
747 * allocator were to change. 748 * allocator were to change.
748 */ 749 */
749 __gk20a_free_pages(a, alloc, true); 750 __nvgpu_free_pages(a, alloc, true);
750 751
751done: 752done:
752 alloc_unlock(__a); 753 alloc_unlock(__a);
753} 754}
754 755
755static void gk20a_page_allocator_destroy(struct gk20a_allocator *__a) 756static void nvgpu_page_allocator_destroy(struct nvgpu_allocator *__a)
756{ 757{
757 struct gk20a_page_allocator *a = page_allocator(__a); 758 struct nvgpu_page_allocator *a = page_allocator(__a);
758 759
759 alloc_lock(__a); 760 alloc_lock(__a);
760 kfree(a); 761 kfree(a);
@@ -762,10 +763,10 @@ static void gk20a_page_allocator_destroy(struct gk20a_allocator *__a)
762 alloc_unlock(__a); 763 alloc_unlock(__a);
763} 764}
764 765
765static void gk20a_page_print_stats(struct gk20a_allocator *__a, 766static void nvgpu_page_print_stats(struct nvgpu_allocator *__a,
766 struct seq_file *s, int lock) 767 struct seq_file *s, int lock)
767{ 768{
768 struct gk20a_page_allocator *a = page_allocator(__a); 769 struct nvgpu_page_allocator *a = page_allocator(__a);
769 int i; 770 int i;
770 771
771 if (lock) 772 if (lock)
@@ -803,31 +804,31 @@ static void gk20a_page_print_stats(struct gk20a_allocator *__a,
803 804
804 __alloc_pstat(s, __a, "Source alloc: %s\n", 805 __alloc_pstat(s, __a, "Source alloc: %s\n",
805 a->source_allocator.name); 806 a->source_allocator.name);
806 gk20a_alloc_print_stats(&a->source_allocator, s, lock); 807 nvgpu_alloc_print_stats(&a->source_allocator, s, lock);
807 808
808 if (lock) 809 if (lock)
809 alloc_unlock(__a); 810 alloc_unlock(__a);
810} 811}
811 812
812static const struct gk20a_allocator_ops page_ops = { 813static const struct nvgpu_allocator_ops page_ops = {
813 .alloc = gk20a_page_alloc, 814 .alloc = nvgpu_page_alloc,
814 .free = gk20a_page_free, 815 .free = nvgpu_page_free,
815 816
816 .alloc_fixed = gk20a_page_alloc_fixed, 817 .alloc_fixed = nvgpu_page_alloc_fixed,
817 .free_fixed = gk20a_page_free_fixed, 818 .free_fixed = nvgpu_page_free_fixed,
818 819
819 .reserve_carveout = gk20a_page_reserve_co, 820 .reserve_carveout = nvgpu_page_reserve_co,
820 .release_carveout = gk20a_page_release_co, 821 .release_carveout = nvgpu_page_release_co,
821 822
822 .base = gk20a_page_alloc_base, 823 .base = nvgpu_page_alloc_base,
823 .length = gk20a_page_alloc_length, 824 .length = nvgpu_page_alloc_length,
824 .end = gk20a_page_alloc_end, 825 .end = nvgpu_page_alloc_end,
825 .inited = gk20a_page_alloc_inited, 826 .inited = nvgpu_page_alloc_inited,
826 .space = gk20a_page_alloc_space, 827 .space = nvgpu_page_alloc_space,
827 828
828 .fini = gk20a_page_allocator_destroy, 829 .fini = nvgpu_page_allocator_destroy,
829 830
830 .print_stats = gk20a_page_print_stats, 831 .print_stats = nvgpu_page_print_stats,
831}; 832};
832 833
833/* 834/*
@@ -840,7 +841,7 @@ static const struct gk20a_allocator_ops page_ops = {
840 * 841 *
841 * That gives buckets of 1, 2, 4, and 8 pages (i.e 4k, 8k, 16k, 32k). 842 * That gives buckets of 1, 2, 4, and 8 pages (i.e 4k, 8k, 16k, 32k).
842 */ 843 */
843static int gk20a_page_alloc_init_slabs(struct gk20a_page_allocator *a) 844static int nvgpu_page_alloc_init_slabs(struct nvgpu_page_allocator *a)
844{ 845{
845 size_t nr_slabs = ilog2(a->page_size >> 12); 846 size_t nr_slabs = ilog2(a->page_size >> 12);
846 unsigned int i; 847 unsigned int i;
@@ -867,17 +868,17 @@ static int gk20a_page_alloc_init_slabs(struct gk20a_page_allocator *a)
867 return 0; 868 return 0;
868} 869}
869 870
870int gk20a_page_allocator_init(struct gk20a *g, struct gk20a_allocator *__a, 871int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
871 const char *name, u64 base, u64 length, 872 const char *name, u64 base, u64 length,
872 u64 blk_size, u64 flags) 873 u64 blk_size, u64 flags)
873{ 874{
874 struct gk20a_page_allocator *a; 875 struct nvgpu_page_allocator *a;
875 char buddy_name[sizeof(__a->name)]; 876 char buddy_name[sizeof(__a->name)];
876 int err; 877 int err;
877 878
878 mutex_lock(&meta_data_cache_lock); 879 mutex_lock(&meta_data_cache_lock);
879 if (!page_alloc_cache) 880 if (!page_alloc_cache)
880 page_alloc_cache = KMEM_CACHE(gk20a_page_alloc, 0); 881 page_alloc_cache = KMEM_CACHE(nvgpu_page_alloc, 0);
881 if (!page_alloc_chunk_cache) 882 if (!page_alloc_chunk_cache)
882 page_alloc_chunk_cache = KMEM_CACHE(page_alloc_chunk, 0); 883 page_alloc_chunk_cache = KMEM_CACHE(page_alloc_chunk, 0);
883 if (!page_alloc_slab_page_cache) 884 if (!page_alloc_slab_page_cache)
@@ -891,11 +892,11 @@ int gk20a_page_allocator_init(struct gk20a *g, struct gk20a_allocator *__a,
891 if (blk_size < SZ_4K) 892 if (blk_size < SZ_4K)
892 return -EINVAL; 893 return -EINVAL;
893 894
894 a = kzalloc(sizeof(struct gk20a_page_allocator), GFP_KERNEL); 895 a = kzalloc(sizeof(struct nvgpu_page_allocator), GFP_KERNEL);
895 if (!a) 896 if (!a)
896 return -ENOMEM; 897 return -ENOMEM;
897 898
898 err = __gk20a_alloc_common_init(__a, name, a, false, &page_ops); 899 err = __nvgpu_alloc_common_init(__a, name, a, false, &page_ops);
899 if (err) 900 if (err)
900 goto fail; 901 goto fail;
901 902
@@ -908,19 +909,19 @@ int gk20a_page_allocator_init(struct gk20a *g, struct gk20a_allocator *__a,
908 a->flags = flags; 909 a->flags = flags;
909 910
910 if (flags & GPU_ALLOC_4K_VIDMEM_PAGES && blk_size > SZ_4K) { 911 if (flags & GPU_ALLOC_4K_VIDMEM_PAGES && blk_size > SZ_4K) {
911 err = gk20a_page_alloc_init_slabs(a); 912 err = nvgpu_page_alloc_init_slabs(a);
912 if (err) 913 if (err)
913 goto fail; 914 goto fail;
914 } 915 }
915 916
916 snprintf(buddy_name, sizeof(buddy_name), "%s-src", name); 917 snprintf(buddy_name, sizeof(buddy_name), "%s-src", name);
917 918
918 err = gk20a_buddy_allocator_init(g, &a->source_allocator, buddy_name, 919 err = nvgpu_buddy_allocator_init(g, &a->source_allocator, buddy_name,
919 base, length, blk_size, 0); 920 base, length, blk_size, 0);
920 if (err) 921 if (err)
921 goto fail; 922 goto fail;
922 923
923 gk20a_init_alloc_debug(g, __a); 924 nvgpu_init_alloc_debug(g, __a);
924 palloc_dbg(a, "New allocator: type page\n"); 925 palloc_dbg(a, "New allocator: type page\n");
925 palloc_dbg(a, " base 0x%llx\n", a->base); 926 palloc_dbg(a, " base 0x%llx\n", a->base);
926 palloc_dbg(a, " size 0x%llx\n", a->length); 927 palloc_dbg(a, " size 0x%llx\n", a->length);
diff --git a/drivers/gpu/nvgpu/gk20a/as_gk20a.c b/drivers/gpu/nvgpu/gk20a/as_gk20a.c
index 0b90090a..07601d42 100644
--- a/drivers/gpu/nvgpu/gk20a/as_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/as_gk20a.c
@@ -279,17 +279,17 @@ static int gk20a_as_ioctl_get_va_regions(
279 279
280 for (i = 0; i < write_entries; ++i) { 280 for (i = 0; i < write_entries; ++i) {
281 struct nvgpu_as_va_region region; 281 struct nvgpu_as_va_region region;
282 struct gk20a_allocator *vma = 282 struct nvgpu_allocator *vma =
283 gk20a_alloc_initialized(&vm->fixed) ? 283 nvgpu_alloc_initialized(&vm->fixed) ?
284 &vm->fixed : &vm->vma[i]; 284 &vm->fixed : &vm->vma[i];
285 285
286 memset(&region, 0, sizeof(struct nvgpu_as_va_region)); 286 memset(&region, 0, sizeof(struct nvgpu_as_va_region));
287 287
288 region.page_size = vm->gmmu_page_sizes[i]; 288 region.page_size = vm->gmmu_page_sizes[i];
289 region.offset = gk20a_alloc_base(vma); 289 region.offset = nvgpu_alloc_base(vma);
290 /* No __aeabi_uldivmod() on some platforms... */ 290 /* No __aeabi_uldivmod() on some platforms... */
291 region.pages = (gk20a_alloc_end(vma) - 291 region.pages = (nvgpu_alloc_end(vma) -
292 gk20a_alloc_base(vma)) >> ilog2(region.page_size); 292 nvgpu_alloc_base(vma)) >> ilog2(region.page_size);
293 293
294 if (copy_to_user(user_region_ptr + i, &region, sizeof(region))) 294 if (copy_to_user(user_region_ptr + i, &region, sizeof(region)))
295 return -EFAULT; 295 return -EFAULT;
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 136c28d0..be01e0e9 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -976,7 +976,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
976 memset(&ch->ramfc, 0, sizeof(struct mem_desc_sub)); 976 memset(&ch->ramfc, 0, sizeof(struct mem_desc_sub));
977 977
978 gk20a_gmmu_unmap_free(ch_vm, &ch->gpfifo.mem); 978 gk20a_gmmu_unmap_free(ch_vm, &ch->gpfifo.mem);
979 nvgpu_free(ch->gpfifo.pipe); 979 nvgpu_kfree(ch->gpfifo.pipe);
980 memset(&ch->gpfifo, 0, sizeof(struct gpfifo_desc)); 980 memset(&ch->gpfifo, 0, sizeof(struct gpfifo_desc));
981 981
982#if defined(CONFIG_GK20A_CYCLE_STATS) 982#if defined(CONFIG_GK20A_CYCLE_STATS)
@@ -1778,7 +1778,7 @@ int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
1778 } 1778 }
1779 1779
1780 if (c->gpfifo.mem.aperture == APERTURE_VIDMEM || g->mm.force_pramin) { 1780 if (c->gpfifo.mem.aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
1781 c->gpfifo.pipe = nvgpu_alloc( 1781 c->gpfifo.pipe = nvgpu_kalloc(
1782 gpfifo_size * sizeof(struct nvgpu_gpfifo), 1782 gpfifo_size * sizeof(struct nvgpu_gpfifo),
1783 false); 1783 false);
1784 if (!c->gpfifo.pipe) { 1784 if (!c->gpfifo.pipe) {
@@ -1850,7 +1850,7 @@ clean_up_sync:
1850 c->sync = NULL; 1850 c->sync = NULL;
1851 } 1851 }
1852clean_up_unmap: 1852clean_up_unmap:
1853 nvgpu_free(c->gpfifo.pipe); 1853 nvgpu_kfree(c->gpfifo.pipe);
1854 gk20a_gmmu_unmap_free(ch_vm, &c->gpfifo.mem); 1854 gk20a_gmmu_unmap_free(ch_vm, &c->gpfifo.mem);
1855clean_up: 1855clean_up:
1856 memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc)); 1856 memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc));
@@ -1980,12 +1980,12 @@ static void trace_write_pushbuffer_range(struct channel_gk20a *c,
1980 if (!g) { 1980 if (!g) {
1981 size = count * sizeof(struct nvgpu_gpfifo); 1981 size = count * sizeof(struct nvgpu_gpfifo);
1982 if (size) { 1982 if (size) {
1983 g = nvgpu_alloc(size, false); 1983 g = nvgpu_kalloc(size, false);
1984 if (!g) 1984 if (!g)
1985 return; 1985 return;
1986 1986
1987 if (copy_from_user(g, user_gpfifo, size)) { 1987 if (copy_from_user(g, user_gpfifo, size)) {
1988 nvgpu_free(g); 1988 nvgpu_kfree(g);
1989 return; 1989 return;
1990 } 1990 }
1991 } 1991 }
@@ -1997,7 +1997,7 @@ static void trace_write_pushbuffer_range(struct channel_gk20a *c,
1997 trace_write_pushbuffer(c, gp); 1997 trace_write_pushbuffer(c, gp);
1998 1998
1999 if (gpfifo_allocated) 1999 if (gpfifo_allocated)
2000 nvgpu_free(g); 2000 nvgpu_kfree(g);
2001} 2001}
2002 2002
2003static void gk20a_channel_timeout_start(struct channel_gk20a *ch, 2003static void gk20a_channel_timeout_start(struct channel_gk20a *ch,
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
index 0a0d94b7..697d1603 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
@@ -143,7 +143,7 @@ struct channel_gk20a {
143 struct list_head ch_entry; /* channel's entry in TSG */ 143 struct list_head ch_entry; /* channel's entry in TSG */
144 144
145 struct channel_gk20a_joblist joblist; 145 struct channel_gk20a_joblist joblist;
146 struct gk20a_allocator fence_allocator; 146 struct nvgpu_allocator fence_allocator;
147 147
148 struct vm_gk20a *vm; 148 struct vm_gk20a *vm;
149 149
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
index e5529295..ac96036f 100644
--- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
@@ -815,7 +815,7 @@ static int nvgpu_dbg_gpu_ioctl_access_fb_memory(struct dbg_session_gk20a *dbg_s,
815 goto fail_dmabuf_put; 815 goto fail_dmabuf_put;
816 } 816 }
817 817
818 buffer = nvgpu_alloc(access_limit_size, true); 818 buffer = nvgpu_kalloc(access_limit_size, true);
819 if (!buffer) { 819 if (!buffer) {
820 err = -ENOMEM; 820 err = -ENOMEM;
821 goto fail_dmabuf_put; 821 goto fail_dmabuf_put;
@@ -861,7 +861,7 @@ static int nvgpu_dbg_gpu_ioctl_access_fb_memory(struct dbg_session_gk20a *dbg_s,
861fail_idle: 861fail_idle:
862 gk20a_idle(g->dev); 862 gk20a_idle(g->dev);
863fail_free_buffer: 863fail_free_buffer:
864 nvgpu_free(buffer); 864 nvgpu_kfree(buffer);
865fail_dmabuf_put: 865fail_dmabuf_put:
866 dma_buf_put(dmabuf); 866 dma_buf_put(dmabuf);
867 867
diff --git a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
index b84db933..8fa108c2 100644
--- a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
@@ -477,7 +477,7 @@ void gk20a_debug_init(struct device *dev, const char *debugfs_symlink)
477 gk20a_railgating_debugfs_init(g->dev); 477 gk20a_railgating_debugfs_init(g->dev);
478 gk20a_cde_debugfs_init(g->dev); 478 gk20a_cde_debugfs_init(g->dev);
479 gk20a_ce_debugfs_init(g->dev); 479 gk20a_ce_debugfs_init(g->dev);
480 gk20a_alloc_debugfs_init(g->dev); 480 nvgpu_alloc_debugfs_init(g->dev);
481 gk20a_mm_debugfs_init(g->dev); 481 gk20a_mm_debugfs_init(g->dev);
482 gk20a_fifo_debugfs_init(g->dev); 482 gk20a_fifo_debugfs_init(g->dev);
483 gk20a_sched_debugfs_init(g->dev); 483 gk20a_sched_debugfs_init(g->dev);
diff --git a/drivers/gpu/nvgpu/gk20a/fence_gk20a.c b/drivers/gpu/nvgpu/gk20a/fence_gk20a.c
index 323caa8f..b8a1dcbc 100644
--- a/drivers/gpu/nvgpu/gk20a/fence_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fence_gk20a.c
@@ -49,8 +49,8 @@ static void gk20a_fence_free(struct kref *ref)
49 gk20a_semaphore_put(f->semaphore); 49 gk20a_semaphore_put(f->semaphore);
50 50
51 if (f->allocator) { 51 if (f->allocator) {
52 if (gk20a_alloc_initialized(f->allocator)) 52 if (nvgpu_alloc_initialized(f->allocator))
53 gk20a_free(f->allocator, (size_t)f); 53 nvgpu_free(f->allocator, (size_t)f);
54 } else 54 } else
55 kfree(f); 55 kfree(f);
56} 56}
@@ -129,7 +129,7 @@ int gk20a_alloc_fence_pool(struct channel_gk20a *c, unsigned int count)
129 if (!fence_pool) 129 if (!fence_pool)
130 return -ENOMEM; 130 return -ENOMEM;
131 131
132 err = gk20a_lockless_allocator_init(c->g, &c->fence_allocator, 132 err = nvgpu_lockless_allocator_init(c->g, &c->fence_allocator,
133 "fence_pool", (size_t)fence_pool, size, 133 "fence_pool", (size_t)fence_pool, size,
134 sizeof(struct gk20a_fence), 0); 134 sizeof(struct gk20a_fence), 0);
135 if (err) 135 if (err)
@@ -144,11 +144,11 @@ fail:
144 144
145void gk20a_free_fence_pool(struct channel_gk20a *c) 145void gk20a_free_fence_pool(struct channel_gk20a *c)
146{ 146{
147 if (gk20a_alloc_initialized(&c->fence_allocator)) { 147 if (nvgpu_alloc_initialized(&c->fence_allocator)) {
148 void *base = (void *)(uintptr_t) 148 void *base = (void *)(uintptr_t)
149 gk20a_alloc_base(&c->fence_allocator); 149 nvgpu_alloc_base(&c->fence_allocator);
150 150
151 gk20a_alloc_destroy(&c->fence_allocator); 151 nvgpu_alloc_destroy(&c->fence_allocator);
152 vfree(base); 152 vfree(base);
153 } 153 }
154} 154}
@@ -158,9 +158,9 @@ struct gk20a_fence *gk20a_alloc_fence(struct channel_gk20a *c)
158 struct gk20a_fence *fence = NULL; 158 struct gk20a_fence *fence = NULL;
159 159
160 if (channel_gk20a_is_prealloc_enabled(c)) { 160 if (channel_gk20a_is_prealloc_enabled(c)) {
161 if (gk20a_alloc_initialized(&c->fence_allocator)) { 161 if (nvgpu_alloc_initialized(&c->fence_allocator)) {
162 fence = (struct gk20a_fence *)(uintptr_t) 162 fence = (struct gk20a_fence *)(uintptr_t)
163 gk20a_alloc(&c->fence_allocator, 163 nvgpu_alloc(&c->fence_allocator,
164 sizeof(struct gk20a_fence)); 164 sizeof(struct gk20a_fence));
165 165
166 /* clear the node and reset the allocator pointer */ 166 /* clear the node and reset the allocator pointer */
diff --git a/drivers/gpu/nvgpu/gk20a/fence_gk20a.h b/drivers/gpu/nvgpu/gk20a/fence_gk20a.h
index beba761a..f38fcbe7 100644
--- a/drivers/gpu/nvgpu/gk20a/fence_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/fence_gk20a.h
@@ -47,7 +47,7 @@ struct gk20a_fence {
47 u32 syncpt_value; 47 u32 syncpt_value;
48 48
49 /* Valid for fences part of a pre-allocated fence pool */ 49 /* Valid for fences part of a pre-allocated fence pool */
50 struct gk20a_allocator *allocator; 50 struct nvgpu_allocator *allocator;
51}; 51};
52 52
53/* Fences can be created from semaphores or syncpoint (id, value) pairs */ 53/* Fences can be created from semaphores or syncpoint (id, value) pairs */
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c
index b1e90bd8..753f031a 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.c
@@ -43,6 +43,8 @@
43#include <linux/sched.h> 43#include <linux/sched.h>
44#include <linux/version.h> 44#include <linux/version.h>
45 45
46#include <nvgpu/allocator.h>
47
46#include "gk20a.h" 48#include "gk20a.h"
47#include "nvgpu_common.h" 49#include "nvgpu_common.h"
48#include "debug_gk20a.h" 50#include "debug_gk20a.h"
@@ -60,7 +62,6 @@
60#include "gk20a_scale.h" 62#include "gk20a_scale.h"
61#include "ctxsw_trace_gk20a.h" 63#include "ctxsw_trace_gk20a.h"
62#include "dbg_gpu_gk20a.h" 64#include "dbg_gpu_gk20a.h"
63#include "gk20a_allocator.h"
64#include "hal.h" 65#include "hal.h"
65#include "vgpu/vgpu.h" 66#include "vgpu/vgpu.h"
66#include "pci.h" 67#include "pci.h"
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 39562ec1..2ee2dd43 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -3400,7 +3400,7 @@ static void gk20a_remove_gr_support(struct gr_gk20a *gr)
3400 gr->ctx_vars.local_golden_image = NULL; 3400 gr->ctx_vars.local_golden_image = NULL;
3401 3401
3402 if (gr->ctx_vars.hwpm_ctxsw_buffer_offset_map) 3402 if (gr->ctx_vars.hwpm_ctxsw_buffer_offset_map)
3403 nvgpu_free(gr->ctx_vars.hwpm_ctxsw_buffer_offset_map); 3403 nvgpu_kfree(gr->ctx_vars.hwpm_ctxsw_buffer_offset_map);
3404 gr->ctx_vars.hwpm_ctxsw_buffer_offset_map = NULL; 3404 gr->ctx_vars.hwpm_ctxsw_buffer_offset_map = NULL;
3405 3405
3406 gk20a_comptag_allocator_destroy(&gr->comp_tags); 3406 gk20a_comptag_allocator_destroy(&gr->comp_tags);
@@ -7998,7 +7998,7 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g)
7998 hwpm_ctxsw_reg_count_max = hwpm_ctxsw_buffer_size >> 2; 7998 hwpm_ctxsw_reg_count_max = hwpm_ctxsw_buffer_size >> 2;
7999 map_size = hwpm_ctxsw_reg_count_max * sizeof(*map); 7999 map_size = hwpm_ctxsw_reg_count_max * sizeof(*map);
8000 8000
8001 map = nvgpu_alloc(map_size, true); 8001 map = nvgpu_kalloc(map_size, true);
8002 if (!map) 8002 if (!map)
8003 return -ENOMEM; 8003 return -ENOMEM;
8004 8004
@@ -8088,7 +8088,7 @@ static int gr_gk20a_create_hwpm_ctxsw_buffer_offset_map(struct gk20a *g)
8088 return 0; 8088 return 0;
8089cleanup: 8089cleanup:
8090 gk20a_err(dev_from_gk20a(g), "Failed to create HWPM buffer offset map"); 8090 gk20a_err(dev_from_gk20a(g), "Failed to create HWPM buffer offset map");
8091 nvgpu_free(map); 8091 nvgpu_kfree(map);
8092 return -EINVAL; 8092 return -EINVAL;
8093} 8093}
8094 8094
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 2e338fef..d594a5a4 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -31,9 +31,9 @@
31#include <uapi/linux/nvgpu.h> 31#include <uapi/linux/nvgpu.h>
32#include <trace/events/gk20a.h> 32#include <trace/events/gk20a.h>
33 33
34#include <gk20a/page_allocator_priv.h>
35
36#include <nvgpu/timers.h> 34#include <nvgpu/timers.h>
35#include <nvgpu/allocator.h>
36#include <nvgpu/page_allocator.h>
37 37
38#include "gk20a.h" 38#include "gk20a.h"
39#include "mm_gk20a.h" 39#include "mm_gk20a.h"
@@ -74,7 +74,7 @@ is_vidmem_page_alloc(u64 addr)
74 return !!(addr & 1ULL); 74 return !!(addr & 1ULL);
75} 75}
76 76
77static inline struct gk20a_page_alloc * 77static inline struct nvgpu_page_alloc *
78get_vidmem_page_alloc(struct scatterlist *sgl) 78get_vidmem_page_alloc(struct scatterlist *sgl)
79{ 79{
80 u64 addr; 80 u64 addr;
@@ -86,7 +86,7 @@ get_vidmem_page_alloc(struct scatterlist *sgl)
86 else 86 else
87 WARN_ON(1); 87 WARN_ON(1);
88 88
89 return (struct gk20a_page_alloc *)(uintptr_t)addr; 89 return (struct nvgpu_page_alloc *)(uintptr_t)addr;
90} 90}
91 91
92int gk20a_mem_begin(struct gk20a *g, struct mem_desc *mem) 92int gk20a_mem_begin(struct gk20a *g, struct mem_desc *mem)
@@ -176,7 +176,7 @@ typedef void (*pramin_access_batch_fn)(struct gk20a *g, u32 start, u32 words,
176static inline void pramin_access_batched(struct gk20a *g, struct mem_desc *mem, 176static inline void pramin_access_batched(struct gk20a *g, struct mem_desc *mem,
177 u32 offset, u32 size, pramin_access_batch_fn loop, u32 **arg) 177 u32 offset, u32 size, pramin_access_batch_fn loop, u32 **arg)
178{ 178{
179 struct gk20a_page_alloc *alloc = NULL; 179 struct nvgpu_page_alloc *alloc = NULL;
180 struct page_alloc_chunk *chunk = NULL; 180 struct page_alloc_chunk *chunk = NULL;
181 u32 byteoff, start_reg, until_end, n; 181 u32 byteoff, start_reg, until_end, n;
182 182
@@ -797,8 +797,8 @@ void gk20a_remove_vm(struct vm_gk20a *vm, struct mem_desc *inst_block)
797static void gk20a_vidmem_destroy(struct gk20a *g) 797static void gk20a_vidmem_destroy(struct gk20a *g)
798{ 798{
799#if defined(CONFIG_GK20A_VIDMEM) 799#if defined(CONFIG_GK20A_VIDMEM)
800 if (gk20a_alloc_initialized(&g->mm.vidmem.allocator)) 800 if (nvgpu_alloc_initialized(&g->mm.vidmem.allocator))
801 gk20a_alloc_destroy(&g->mm.vidmem.allocator); 801 nvgpu_alloc_destroy(&g->mm.vidmem.allocator);
802#endif 802#endif
803} 803}
804 804
@@ -928,8 +928,8 @@ static int gk20a_init_vidmem(struct mm_gk20a *mm)
928 u64 default_page_size = SZ_64K; 928 u64 default_page_size = SZ_64K;
929 int err; 929 int err;
930 930
931 static struct gk20a_alloc_carveout wpr_co = 931 static struct nvgpu_alloc_carveout wpr_co =
932 GK20A_CARVEOUT("wpr-region", 0, SZ_16M); 932 NVGPU_CARVEOUT("wpr-region", 0, SZ_16M);
933 933
934 if (!size) 934 if (!size)
935 return 0; 935 return 0;
@@ -944,12 +944,12 @@ static int gk20a_init_vidmem(struct mm_gk20a *mm)
944 * initialization requires vidmem but we want to use the CE to zero 944 * initialization requires vidmem but we want to use the CE to zero
945 * out vidmem before allocating it... 945 * out vidmem before allocating it...
946 */ 946 */
947 err = gk20a_page_allocator_init(g, &g->mm.vidmem.bootstrap_allocator, 947 err = nvgpu_page_allocator_init(g, &g->mm.vidmem.bootstrap_allocator,
948 "vidmem-bootstrap", 948 "vidmem-bootstrap",
949 bootstrap_base, bootstrap_size, 949 bootstrap_base, bootstrap_size,
950 SZ_4K, 0); 950 SZ_4K, 0);
951 951
952 err = gk20a_page_allocator_init(g, &g->mm.vidmem.allocator, 952 err = nvgpu_page_allocator_init(g, &g->mm.vidmem.allocator,
953 "vidmem", 953 "vidmem",
954 base, size - base, 954 base, size - base,
955 default_page_size, 955 default_page_size,
@@ -961,7 +961,7 @@ static int gk20a_init_vidmem(struct mm_gk20a *mm)
961 } 961 }
962 962
963 /* Reserve bootstrap region in vidmem allocator */ 963 /* Reserve bootstrap region in vidmem allocator */
964 gk20a_alloc_reserve_carveout(&g->mm.vidmem.allocator, &wpr_co); 964 nvgpu_alloc_reserve_carveout(&g->mm.vidmem.allocator, &wpr_co);
965 965
966 mm->vidmem.base = base; 966 mm->vidmem.base = base;
967 mm->vidmem.size = size - base; 967 mm->vidmem.size = size - base;
@@ -1482,7 +1482,7 @@ int gk20a_vm_get_buffers(struct vm_gk20a *vm,
1482 1482
1483 mutex_lock(&vm->update_gmmu_lock); 1483 mutex_lock(&vm->update_gmmu_lock);
1484 1484
1485 buffer_list = nvgpu_alloc(sizeof(*buffer_list) * 1485 buffer_list = nvgpu_kalloc(sizeof(*buffer_list) *
1486 vm->num_user_mapped_buffers, true); 1486 vm->num_user_mapped_buffers, true);
1487 if (!buffer_list) { 1487 if (!buffer_list) {
1488 mutex_unlock(&vm->update_gmmu_lock); 1488 mutex_unlock(&vm->update_gmmu_lock);
@@ -1567,7 +1567,7 @@ void gk20a_vm_put_buffers(struct vm_gk20a *vm,
1567 gk20a_vm_mapping_batch_finish_locked(vm, &batch); 1567 gk20a_vm_mapping_batch_finish_locked(vm, &batch);
1568 mutex_unlock(&vm->update_gmmu_lock); 1568 mutex_unlock(&vm->update_gmmu_lock);
1569 1569
1570 nvgpu_free(mapped_buffers); 1570 nvgpu_kfree(mapped_buffers);
1571} 1571}
1572 1572
1573static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset, 1573static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset,
@@ -1623,7 +1623,7 @@ u64 gk20a_vm_alloc_va(struct vm_gk20a *vm,
1623 enum gmmu_pgsz_gk20a gmmu_pgsz_idx) 1623 enum gmmu_pgsz_gk20a gmmu_pgsz_idx)
1624 1624
1625{ 1625{
1626 struct gk20a_allocator *vma = &vm->vma[gmmu_pgsz_idx]; 1626 struct nvgpu_allocator *vma = &vm->vma[gmmu_pgsz_idx];
1627 u64 offset; 1627 u64 offset;
1628 u64 gmmu_page_size = vm->gmmu_page_sizes[gmmu_pgsz_idx]; 1628 u64 gmmu_page_size = vm->gmmu_page_sizes[gmmu_pgsz_idx];
1629 1629
@@ -1645,7 +1645,7 @@ u64 gk20a_vm_alloc_va(struct vm_gk20a *vm,
1645 gk20a_dbg_info("size=0x%llx @ pgsz=%dKB", size, 1645 gk20a_dbg_info("size=0x%llx @ pgsz=%dKB", size,
1646 vm->gmmu_page_sizes[gmmu_pgsz_idx]>>10); 1646 vm->gmmu_page_sizes[gmmu_pgsz_idx]>>10);
1647 1647
1648 offset = gk20a_alloc(vma, size); 1648 offset = nvgpu_alloc(vma, size);
1649 if (!offset) { 1649 if (!offset) {
1650 gk20a_err(dev_from_vm(vm), 1650 gk20a_err(dev_from_vm(vm),
1651 "%s oom: sz=0x%llx", vma->name, size); 1651 "%s oom: sz=0x%llx", vma->name, size);
@@ -1660,11 +1660,11 @@ int gk20a_vm_free_va(struct vm_gk20a *vm,
1660 u64 offset, u64 size, 1660 u64 offset, u64 size,
1661 enum gmmu_pgsz_gk20a pgsz_idx) 1661 enum gmmu_pgsz_gk20a pgsz_idx)
1662{ 1662{
1663 struct gk20a_allocator *vma = &vm->vma[pgsz_idx]; 1663 struct nvgpu_allocator *vma = &vm->vma[pgsz_idx];
1664 1664
1665 gk20a_dbg_info("%s free addr=0x%llx, size=0x%llx", 1665 gk20a_dbg_info("%s free addr=0x%llx, size=0x%llx",
1666 vma->name, offset, size); 1666 vma->name, offset, size);
1667 gk20a_free(vma, offset); 1667 nvgpu_free(vma, offset);
1668 1668
1669 return 0; 1669 return 0;
1670} 1670}
@@ -2302,15 +2302,15 @@ err_kfree:
2302int gk20a_vidmem_get_space(struct gk20a *g, u64 *space) 2302int gk20a_vidmem_get_space(struct gk20a *g, u64 *space)
2303{ 2303{
2304#if defined(CONFIG_GK20A_VIDMEM) 2304#if defined(CONFIG_GK20A_VIDMEM)
2305 struct gk20a_allocator *allocator = &g->mm.vidmem.allocator; 2305 struct nvgpu_allocator *allocator = &g->mm.vidmem.allocator;
2306 2306
2307 gk20a_dbg_fn(""); 2307 gk20a_dbg_fn("");
2308 2308
2309 if (!gk20a_alloc_initialized(allocator)) 2309 if (!nvgpu_alloc_initialized(allocator))
2310 return -ENOSYS; 2310 return -ENOSYS;
2311 2311
2312 mutex_lock(&g->mm.vidmem.clear_list_mutex); 2312 mutex_lock(&g->mm.vidmem.clear_list_mutex);
2313 *space = gk20a_alloc_space(allocator) + 2313 *space = nvgpu_alloc_space(allocator) +
2314 atomic64_read(&g->mm.vidmem.bytes_pending); 2314 atomic64_read(&g->mm.vidmem.bytes_pending);
2315 mutex_unlock(&g->mm.vidmem.clear_list_mutex); 2315 mutex_unlock(&g->mm.vidmem.clear_list_mutex);
2316 return 0; 2316 return 0;
@@ -2359,7 +2359,7 @@ static u64 gk20a_mm_get_align(struct gk20a *g, struct scatterlist *sgl,
2359 u64 buf_addr; 2359 u64 buf_addr;
2360 2360
2361 if (aperture == APERTURE_VIDMEM) { 2361 if (aperture == APERTURE_VIDMEM) {
2362 struct gk20a_page_alloc *alloc = get_vidmem_page_alloc(sgl); 2362 struct nvgpu_page_alloc *alloc = get_vidmem_page_alloc(sgl);
2363 struct page_alloc_chunk *chunk = NULL; 2363 struct page_alloc_chunk *chunk = NULL;
2364 2364
2365 list_for_each_entry(chunk, &alloc->alloc_chunks, list_entry) { 2365 list_for_each_entry(chunk, &alloc->alloc_chunks, list_entry) {
@@ -3068,7 +3068,7 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct mem_desc *mem)
3068{ 3068{
3069 struct gk20a_fence *gk20a_fence_out = NULL; 3069 struct gk20a_fence *gk20a_fence_out = NULL;
3070 struct gk20a_fence *gk20a_last_fence = NULL; 3070 struct gk20a_fence *gk20a_last_fence = NULL;
3071 struct gk20a_page_alloc *alloc = NULL; 3071 struct nvgpu_page_alloc *alloc = NULL;
3072 struct page_alloc_chunk *chunk = NULL; 3072 struct page_alloc_chunk *chunk = NULL;
3073 int err = 0; 3073 int err = 0;
3074 3074
@@ -3134,15 +3134,15 @@ int gk20a_gmmu_alloc_attr_vid(struct gk20a *g, enum dma_attr attr,
3134} 3134}
3135 3135
3136#if defined(CONFIG_GK20A_VIDMEM) 3136#if defined(CONFIG_GK20A_VIDMEM)
3137static u64 __gk20a_gmmu_alloc(struct gk20a_allocator *allocator, dma_addr_t at, 3137static u64 __gk20a_gmmu_alloc(struct nvgpu_allocator *allocator, dma_addr_t at,
3138 size_t size) 3138 size_t size)
3139{ 3139{
3140 u64 addr = 0; 3140 u64 addr = 0;
3141 3141
3142 if (at) 3142 if (at)
3143 addr = gk20a_alloc_fixed(allocator, at, size); 3143 addr = nvgpu_alloc_fixed(allocator, at, size);
3144 else 3144 else
3145 addr = gk20a_alloc(allocator, size); 3145 addr = nvgpu_alloc(allocator, size);
3146 3146
3147 return addr; 3147 return addr;
3148} 3148}
@@ -3154,14 +3154,14 @@ int gk20a_gmmu_alloc_attr_vid_at(struct gk20a *g, enum dma_attr attr,
3154#if defined(CONFIG_GK20A_VIDMEM) 3154#if defined(CONFIG_GK20A_VIDMEM)
3155 u64 addr; 3155 u64 addr;
3156 int err; 3156 int err;
3157 struct gk20a_allocator *vidmem_alloc = g->mm.vidmem.cleared ? 3157 struct nvgpu_allocator *vidmem_alloc = g->mm.vidmem.cleared ?
3158 &g->mm.vidmem.allocator : 3158 &g->mm.vidmem.allocator :
3159 &g->mm.vidmem.bootstrap_allocator; 3159 &g->mm.vidmem.bootstrap_allocator;
3160 int before_pending; 3160 int before_pending;
3161 3161
3162 gk20a_dbg_fn(""); 3162 gk20a_dbg_fn("");
3163 3163
3164 if (!gk20a_alloc_initialized(&g->mm.vidmem.allocator)) 3164 if (!nvgpu_alloc_initialized(&g->mm.vidmem.allocator))
3165 return -ENOSYS; 3165 return -ENOSYS;
3166 3166
3167 /* we don't support dma attributes here, except that kernel mappings 3167 /* we don't support dma attributes here, except that kernel mappings
@@ -3214,7 +3214,7 @@ int gk20a_gmmu_alloc_attr_vid_at(struct gk20a *g, enum dma_attr attr,
3214fail_kfree: 3214fail_kfree:
3215 kfree(mem->sgt); 3215 kfree(mem->sgt);
3216fail_physfree: 3216fail_physfree:
3217 gk20a_free(&g->mm.vidmem.allocator, addr); 3217 nvgpu_free(&g->mm.vidmem.allocator, addr);
3218 return err; 3218 return err;
3219#else 3219#else
3220 return -ENOSYS; 3220 return -ENOSYS;
@@ -3241,7 +3241,7 @@ static void gk20a_gmmu_free_attr_vid(struct gk20a *g, enum dma_attr attr,
3241 } 3241 }
3242 } else { 3242 } else {
3243 gk20a_memset(g, mem, 0, 0, mem->size); 3243 gk20a_memset(g, mem, 0, 0, mem->size);
3244 gk20a_free(mem->allocator, 3244 nvgpu_free(mem->allocator,
3245 (u64)get_vidmem_page_alloc(mem->sgt->sgl)); 3245 (u64)get_vidmem_page_alloc(mem->sgt->sgl));
3246 gk20a_free_sgtable(&mem->sgt); 3246 gk20a_free_sgtable(&mem->sgt);
3247 3247
@@ -3276,7 +3276,7 @@ void gk20a_gmmu_free(struct gk20a *g, struct mem_desc *mem)
3276u64 gk20a_mem_get_base_addr(struct gk20a *g, struct mem_desc *mem, 3276u64 gk20a_mem_get_base_addr(struct gk20a *g, struct mem_desc *mem,
3277 u32 flags) 3277 u32 flags)
3278{ 3278{
3279 struct gk20a_page_alloc *alloc; 3279 struct nvgpu_page_alloc *alloc;
3280 u64 addr; 3280 u64 addr;
3281 3281
3282 if (mem->aperture == APERTURE_VIDMEM) { 3282 if (mem->aperture == APERTURE_VIDMEM) {
@@ -3317,7 +3317,7 @@ static void gk20a_vidmem_clear_mem_worker(struct work_struct *work)
3317 3317
3318 while ((mem = get_pending_mem_desc(mm)) != NULL) { 3318 while ((mem = get_pending_mem_desc(mm)) != NULL) {
3319 gk20a_gmmu_clear_vidmem_mem(g, mem); 3319 gk20a_gmmu_clear_vidmem_mem(g, mem);
3320 gk20a_free(mem->allocator, 3320 nvgpu_free(mem->allocator,
3321 (u64)get_vidmem_page_alloc(mem->sgt->sgl)); 3321 (u64)get_vidmem_page_alloc(mem->sgt->sgl));
3322 gk20a_free_sgtable(&mem->sgt); 3322 gk20a_free_sgtable(&mem->sgt);
3323 3323
@@ -3905,7 +3905,7 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
3905 u32 page_size = vm->gmmu_page_sizes[pgsz_idx]; 3905 u32 page_size = vm->gmmu_page_sizes[pgsz_idx];
3906 int err; 3906 int err;
3907 struct scatterlist *sgl = NULL; 3907 struct scatterlist *sgl = NULL;
3908 struct gk20a_page_alloc *alloc = NULL; 3908 struct nvgpu_page_alloc *alloc = NULL;
3909 struct page_alloc_chunk *chunk = NULL; 3909 struct page_alloc_chunk *chunk = NULL;
3910 u64 length; 3910 u64 length;
3911 3911
@@ -4251,12 +4251,12 @@ static int gk20a_init_sema_pool(struct vm_gk20a *vm)
4251 * 4251 *
4252 * !!! TODO: cleanup. 4252 * !!! TODO: cleanup.
4253 */ 4253 */
4254 sema_sea->gpu_va = gk20a_alloc_fixed(&vm->vma[gmmu_page_size_kernel], 4254 sema_sea->gpu_va = nvgpu_alloc_fixed(&vm->vma[gmmu_page_size_kernel],
4255 vm->va_limit - 4255 vm->va_limit -
4256 mm->channel.kernel_size, 4256 mm->channel.kernel_size,
4257 512 * PAGE_SIZE); 4257 512 * PAGE_SIZE);
4258 if (!sema_sea->gpu_va) { 4258 if (!sema_sea->gpu_va) {
4259 gk20a_free(&vm->vma[gmmu_page_size_small], sema_sea->gpu_va); 4259 nvgpu_free(&vm->vma[gmmu_page_size_small], sema_sea->gpu_va);
4260 gk20a_vm_put(vm); 4260 gk20a_vm_put(vm);
4261 return -ENOMEM; 4261 return -ENOMEM;
4262 } 4262 }
@@ -4264,7 +4264,7 @@ static int gk20a_init_sema_pool(struct vm_gk20a *vm)
4264 err = gk20a_semaphore_pool_map(vm->sema_pool, vm); 4264 err = gk20a_semaphore_pool_map(vm->sema_pool, vm);
4265 if (err) { 4265 if (err) {
4266 gk20a_semaphore_pool_unmap(vm->sema_pool, vm); 4266 gk20a_semaphore_pool_unmap(vm->sema_pool, vm);
4267 gk20a_free(&vm->vma[gmmu_page_size_small], 4267 nvgpu_free(&vm->vma[gmmu_page_size_small],
4268 vm->sema_pool->gpu_va); 4268 vm->sema_pool->gpu_va);
4269 gk20a_vm_put(vm); 4269 gk20a_vm_put(vm);
4270 } 4270 }
@@ -4387,7 +4387,7 @@ int gk20a_init_vm(struct mm_gk20a *mm,
4387 snprintf(alloc_name, sizeof(alloc_name), 4387 snprintf(alloc_name, sizeof(alloc_name),
4388 "gk20a_%s-fixed", name); 4388 "gk20a_%s-fixed", name);
4389 4389
4390 err = __gk20a_buddy_allocator_init(g, &vm->fixed, 4390 err = __nvgpu_buddy_allocator_init(g, &vm->fixed,
4391 vm, alloc_name, 4391 vm, alloc_name,
4392 small_vma_start, 4392 small_vma_start,
4393 g->separate_fixed_allocs, 4393 g->separate_fixed_allocs,
@@ -4404,7 +4404,7 @@ int gk20a_init_vm(struct mm_gk20a *mm,
4404 if (small_vma_start < small_vma_limit) { 4404 if (small_vma_start < small_vma_limit) {
4405 snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s-%dKB", name, 4405 snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s-%dKB", name,
4406 vm->gmmu_page_sizes[gmmu_page_size_small] >> 10); 4406 vm->gmmu_page_sizes[gmmu_page_size_small] >> 10);
4407 err = __gk20a_buddy_allocator_init( 4407 err = __nvgpu_buddy_allocator_init(
4408 g, 4408 g,
4409 &vm->vma[gmmu_page_size_small], 4409 &vm->vma[gmmu_page_size_small],
4410 vm, alloc_name, 4410 vm, alloc_name,
@@ -4420,7 +4420,7 @@ int gk20a_init_vm(struct mm_gk20a *mm,
4420 if (large_vma_start < large_vma_limit) { 4420 if (large_vma_start < large_vma_limit) {
4421 snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s-%dKB", 4421 snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s-%dKB",
4422 name, vm->gmmu_page_sizes[gmmu_page_size_big] >> 10); 4422 name, vm->gmmu_page_sizes[gmmu_page_size_big] >> 10);
4423 err = __gk20a_buddy_allocator_init( 4423 err = __nvgpu_buddy_allocator_init(
4424 g, 4424 g,
4425 &vm->vma[gmmu_page_size_big], 4425 &vm->vma[gmmu_page_size_big],
4426 vm, alloc_name, 4426 vm, alloc_name,
@@ -4438,7 +4438,7 @@ int gk20a_init_vm(struct mm_gk20a *mm,
4438 /* 4438 /*
4439 * kernel reserved VMA is at the end of the aperture 4439 * kernel reserved VMA is at the end of the aperture
4440 */ 4440 */
4441 err = __gk20a_buddy_allocator_init(g, &vm->vma[gmmu_page_size_kernel], 4441 err = __nvgpu_buddy_allocator_init(g, &vm->vma[gmmu_page_size_kernel],
4442 vm, alloc_name, 4442 vm, alloc_name,
4443 kernel_vma_start, 4443 kernel_vma_start,
4444 kernel_vma_limit - kernel_vma_start, 4444 kernel_vma_limit - kernel_vma_start,
@@ -4469,10 +4469,10 @@ int gk20a_init_vm(struct mm_gk20a *mm,
4469 4469
4470clean_up_big_allocator: 4470clean_up_big_allocator:
4471 if (large_vma_start < large_vma_limit) 4471 if (large_vma_start < large_vma_limit)
4472 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_big]); 4472 nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_big]);
4473clean_up_small_allocator: 4473clean_up_small_allocator:
4474 if (small_vma_start < small_vma_limit) 4474 if (small_vma_start < small_vma_limit)
4475 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_small]); 4475 nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_small]);
4476clean_up_ptes: 4476clean_up_ptes:
4477 free_gmmu_pages(vm, &vm->pdb); 4477 free_gmmu_pages(vm, &vm->pdb);
4478clean_up_pdes: 4478clean_up_pdes:
@@ -4547,7 +4547,7 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
4547{ 4547{
4548 int err = -ENOMEM; 4548 int err = -ENOMEM;
4549 int pgsz_idx = gmmu_page_size_small; 4549 int pgsz_idx = gmmu_page_size_small;
4550 struct gk20a_allocator *vma; 4550 struct nvgpu_allocator *vma;
4551 struct vm_gk20a *vm = as_share->vm; 4551 struct vm_gk20a *vm = as_share->vm;
4552 struct gk20a *g = vm->mm->g; 4552 struct gk20a *g = vm->mm->g;
4553 struct vm_reserved_va_node *va_node; 4553 struct vm_reserved_va_node *va_node;
@@ -4579,13 +4579,13 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
4579 4579
4580 vma = &vm->vma[pgsz_idx]; 4580 vma = &vm->vma[pgsz_idx];
4581 if (args->flags & NVGPU_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET) { 4581 if (args->flags & NVGPU_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET) {
4582 if (gk20a_alloc_initialized(&vm->fixed)) 4582 if (nvgpu_alloc_initialized(&vm->fixed))
4583 vma = &vm->fixed; 4583 vma = &vm->fixed;
4584 vaddr_start = gk20a_alloc_fixed(vma, args->o_a.offset, 4584 vaddr_start = nvgpu_alloc_fixed(vma, args->o_a.offset,
4585 (u64)args->pages * 4585 (u64)args->pages *
4586 (u64)args->page_size); 4586 (u64)args->page_size);
4587 } else { 4587 } else {
4588 vaddr_start = gk20a_alloc(vma, 4588 vaddr_start = nvgpu_alloc(vma,
4589 (u64)args->pages * 4589 (u64)args->pages *
4590 (u64)args->page_size); 4590 (u64)args->page_size);
4591 } 4591 }
@@ -4621,7 +4621,7 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
4621 APERTURE_INVALID); 4621 APERTURE_INVALID);
4622 if (!map_offset) { 4622 if (!map_offset) {
4623 mutex_unlock(&vm->update_gmmu_lock); 4623 mutex_unlock(&vm->update_gmmu_lock);
4624 gk20a_free(vma, vaddr_start); 4624 nvgpu_free(vma, vaddr_start);
4625 kfree(va_node); 4625 kfree(va_node);
4626 goto clean_up; 4626 goto clean_up;
4627 } 4627 }
@@ -4644,7 +4644,7 @@ int gk20a_vm_free_space(struct gk20a_as_share *as_share,
4644{ 4644{
4645 int err = -ENOMEM; 4645 int err = -ENOMEM;
4646 int pgsz_idx; 4646 int pgsz_idx;
4647 struct gk20a_allocator *vma; 4647 struct nvgpu_allocator *vma;
4648 struct vm_gk20a *vm = as_share->vm; 4648 struct vm_gk20a *vm = as_share->vm;
4649 struct vm_reserved_va_node *va_node; 4649 struct vm_reserved_va_node *va_node;
4650 struct gk20a *g = gk20a_from_vm(vm); 4650 struct gk20a *g = gk20a_from_vm(vm);
@@ -4656,11 +4656,11 @@ int gk20a_vm_free_space(struct gk20a_as_share *as_share,
4656 pgsz_idx = __nv_gmmu_va_is_big_page_region(vm, args->offset) ? 4656 pgsz_idx = __nv_gmmu_va_is_big_page_region(vm, args->offset) ?
4657 gmmu_page_size_big : gmmu_page_size_small; 4657 gmmu_page_size_big : gmmu_page_size_small;
4658 4658
4659 if (gk20a_alloc_initialized(&vm->fixed)) 4659 if (nvgpu_alloc_initialized(&vm->fixed))
4660 vma = &vm->fixed; 4660 vma = &vm->fixed;
4661 else 4661 else
4662 vma = &vm->vma[pgsz_idx]; 4662 vma = &vm->vma[pgsz_idx];
4663 gk20a_free(vma, args->offset); 4663 nvgpu_free(vma, args->offset);
4664 4664
4665 mutex_lock(&vm->update_gmmu_lock); 4665 mutex_lock(&vm->update_gmmu_lock);
4666 va_node = addr_to_reservation(vm, args->offset); 4666 va_node = addr_to_reservation(vm, args->offset);
@@ -4844,13 +4844,13 @@ int gk20a_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset,
4844 4844
4845void gk20a_deinit_vm(struct vm_gk20a *vm) 4845void gk20a_deinit_vm(struct vm_gk20a *vm)
4846{ 4846{
4847 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_kernel]); 4847 nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_kernel]);
4848 if (gk20a_alloc_initialized(&vm->vma[gmmu_page_size_big])) 4848 if (nvgpu_alloc_initialized(&vm->vma[gmmu_page_size_big]))
4849 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_big]); 4849 nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_big]);
4850 if (gk20a_alloc_initialized(&vm->vma[gmmu_page_size_small])) 4850 if (nvgpu_alloc_initialized(&vm->vma[gmmu_page_size_small]))
4851 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_small]); 4851 nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_small]);
4852 if (gk20a_alloc_initialized(&vm->fixed)) 4852 if (nvgpu_alloc_initialized(&vm->fixed))
4853 gk20a_alloc_destroy(&vm->fixed); 4853 nvgpu_alloc_destroy(&vm->fixed);
4854 4854
4855 gk20a_vm_free_entries(vm, &vm->pdb, 0); 4855 gk20a_vm_free_entries(vm, &vm->pdb, 0);
4856} 4856}
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index d32e121a..f58b5df5 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -27,7 +27,8 @@
27#include <linux/version.h> 27#include <linux/version.h>
28#include <asm/dma-iommu.h> 28#include <asm/dma-iommu.h>
29#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
30#include "gk20a_allocator.h" 30
31#include <nvgpu/allocator.h>
31 32
32#ifdef CONFIG_ARM64 33#ifdef CONFIG_ARM64
33#define outer_flush_range(a, b) 34#define outer_flush_range(a, b)
@@ -70,7 +71,7 @@ struct mem_desc {
70 u64 gpu_va; 71 u64 gpu_va;
71 bool fixed; /* vidmem only */ 72 bool fixed; /* vidmem only */
72 bool user_mem; /* vidmem only */ 73 bool user_mem; /* vidmem only */
73 struct gk20a_allocator *allocator; /* vidmem only */ 74 struct nvgpu_allocator *allocator; /* vidmem only */
74 struct list_head clear_list_entry; /* vidmem only */ 75 struct list_head clear_list_entry; /* vidmem only */
75 bool skip_wmb; 76 bool skip_wmb;
76}; 77};
@@ -295,10 +296,10 @@ struct vm_gk20a {
295 296
296 struct gk20a_mm_entry pdb; 297 struct gk20a_mm_entry pdb;
297 298
298 struct gk20a_allocator vma[gmmu_nr_page_sizes]; 299 struct nvgpu_allocator vma[gmmu_nr_page_sizes];
299 300
300 /* If necessary, split fixed from non-fixed. */ 301 /* If necessary, split fixed from non-fixed. */
301 struct gk20a_allocator fixed; 302 struct nvgpu_allocator fixed;
302 303
303 struct rb_root mapped_buffers; 304 struct rb_root mapped_buffers;
304 305
@@ -421,8 +422,8 @@ struct mm_gk20a {
421 size_t bootstrap_size; 422 size_t bootstrap_size;
422 u64 bootstrap_base; 423 u64 bootstrap_base;
423 424
424 struct gk20a_allocator allocator; 425 struct nvgpu_allocator allocator;
425 struct gk20a_allocator bootstrap_allocator; 426 struct nvgpu_allocator bootstrap_allocator;
426 427
427 u32 ce_ctx_id; 428 u32 ce_ctx_id;
428 volatile bool cleared; 429 volatile bool cleared;
@@ -470,13 +471,13 @@ static inline u64 __nv_gmmu_va_small_page_limit(void)
470 471
471static inline int __nv_gmmu_va_is_big_page_region(struct vm_gk20a *vm, u64 addr) 472static inline int __nv_gmmu_va_is_big_page_region(struct vm_gk20a *vm, u64 addr)
472{ 473{
473 struct gk20a_allocator *a = &vm->vma[gmmu_page_size_big]; 474 struct nvgpu_allocator *a = &vm->vma[gmmu_page_size_big];
474 475
475 if (!vm->big_pages) 476 if (!vm->big_pages)
476 return 0; 477 return 0;
477 478
478 return addr >= gk20a_alloc_base(a) && 479 return addr >= nvgpu_alloc_base(a) &&
479 addr < gk20a_alloc_base(a) + gk20a_alloc_length(a); 480 addr < nvgpu_alloc_base(a) + nvgpu_alloc_length(a);
480} 481}
481 482
482/* 483/*
@@ -825,7 +826,7 @@ void gk20a_remove_vm(struct vm_gk20a *vm, struct mem_desc *inst_block);
825extern const struct gk20a_mmu_level gk20a_mm_levels_64k[]; 826extern const struct gk20a_mmu_level gk20a_mm_levels_64k[];
826extern const struct gk20a_mmu_level gk20a_mm_levels_128k[]; 827extern const struct gk20a_mmu_level gk20a_mm_levels_128k[];
827 828
828static inline void *nvgpu_alloc(size_t size, bool clear) 829static inline void *nvgpu_kalloc(size_t size, bool clear)
829{ 830{
830 void *p; 831 void *p;
831 832
@@ -844,7 +845,7 @@ static inline void *nvgpu_alloc(size_t size, bool clear)
844 return p; 845 return p;
845} 846}
846 847
847static inline void nvgpu_free(void *p) 848static inline void nvgpu_kfree(void *p)
848{ 849{
849 if (virt_addr_valid(p)) 850 if (virt_addr_valid(p))
850 kfree(p); 851 kfree(p);
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index e221be11..56ebda1a 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -2896,8 +2896,8 @@ void gk20a_remove_pmu_support(struct pmu_gk20a *pmu)
2896{ 2896{
2897 gk20a_dbg_fn(""); 2897 gk20a_dbg_fn("");
2898 2898
2899 if (gk20a_alloc_initialized(&pmu->dmem)) 2899 if (nvgpu_alloc_initialized(&pmu->dmem))
2900 gk20a_alloc_destroy(&pmu->dmem); 2900 nvgpu_alloc_destroy(&pmu->dmem);
2901 2901
2902 release_firmware(pmu->fw); 2902 release_firmware(pmu->fw);
2903} 2903}
@@ -3607,7 +3607,7 @@ static int pmu_init_perfmon(struct pmu_gk20a *pmu)
3607 gk20a_writel(g, pwr_pmu_idle_ctrl_r(2), data); 3607 gk20a_writel(g, pwr_pmu_idle_ctrl_r(2), data);
3608 3608
3609 if (!pmu->sample_buffer) 3609 if (!pmu->sample_buffer)
3610 pmu->sample_buffer = gk20a_alloc(&pmu->dmem, 3610 pmu->sample_buffer = nvgpu_alloc(&pmu->dmem,
3611 2 * sizeof(u16)); 3611 2 * sizeof(u16));
3612 if (!pmu->sample_buffer) { 3612 if (!pmu->sample_buffer) {
3613 gk20a_err(dev_from_gk20a(g), 3613 gk20a_err(dev_from_gk20a(g),
@@ -3708,7 +3708,7 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu,
3708 for (i = 0; i < PMU_QUEUE_COUNT; i++) 3708 for (i = 0; i < PMU_QUEUE_COUNT; i++)
3709 pmu_queue_init(pmu, i, init); 3709 pmu_queue_init(pmu, i, init);
3710 3710
3711 if (!gk20a_alloc_initialized(&pmu->dmem)) { 3711 if (!nvgpu_alloc_initialized(&pmu->dmem)) {
3712 /* Align start and end addresses */ 3712 /* Align start and end addresses */
3713 u32 start = ALIGN(pv->get_pmu_init_msg_pmu_sw_mg_off(init), 3713 u32 start = ALIGN(pv->get_pmu_init_msg_pmu_sw_mg_off(init),
3714 PMU_DMEM_ALLOC_ALIGNMENT); 3714 PMU_DMEM_ALLOC_ALIGNMENT);
@@ -3716,9 +3716,9 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu,
3716 pv->get_pmu_init_msg_pmu_sw_mg_size(init)) & 3716 pv->get_pmu_init_msg_pmu_sw_mg_size(init)) &
3717 ~(PMU_DMEM_ALLOC_ALIGNMENT - 1); 3717 ~(PMU_DMEM_ALLOC_ALIGNMENT - 1);
3718 u32 size = end - start; 3718 u32 size = end - start;
3719 gk20a_bitmap_allocator_init(g, &pmu->dmem, "gk20a_pmu_dmem", 3719 nvgpu_bitmap_allocator_init(g, &pmu->dmem, "gk20a_pmu_dmem",
3720 start, size, 3720 start, size,
3721 PMU_DMEM_ALLOC_ALIGNMENT, 0); 3721 PMU_DMEM_ALLOC_ALIGNMENT, 0);
3722 } 3722 }
3723 3723
3724 pmu->pmu_ready = true; 3724 pmu->pmu_ready = true;
@@ -3855,12 +3855,12 @@ static int pmu_response_handle(struct pmu_gk20a *pmu,
3855 seq->callback = NULL; 3855 seq->callback = NULL;
3856 if (pv->pmu_allocation_get_dmem_size(pmu, 3856 if (pv->pmu_allocation_get_dmem_size(pmu,
3857 pv->get_pmu_seq_in_a_ptr(seq)) != 0) 3857 pv->get_pmu_seq_in_a_ptr(seq)) != 0)
3858 gk20a_free(&pmu->dmem, 3858 nvgpu_free(&pmu->dmem,
3859 pv->pmu_allocation_get_dmem_offset(pmu, 3859 pv->pmu_allocation_get_dmem_offset(pmu,
3860 pv->get_pmu_seq_in_a_ptr(seq))); 3860 pv->get_pmu_seq_in_a_ptr(seq)));
3861 if (pv->pmu_allocation_get_dmem_size(pmu, 3861 if (pv->pmu_allocation_get_dmem_size(pmu,
3862 pv->get_pmu_seq_out_a_ptr(seq)) != 0) 3862 pv->get_pmu_seq_out_a_ptr(seq)) != 0)
3863 gk20a_free(&pmu->dmem, 3863 nvgpu_free(&pmu->dmem,
3864 pv->pmu_allocation_get_dmem_offset(pmu, 3864 pv->pmu_allocation_get_dmem_offset(pmu,
3865 pv->get_pmu_seq_out_a_ptr(seq))); 3865 pv->get_pmu_seq_out_a_ptr(seq)));
3866 3866
@@ -4601,7 +4601,7 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
4601 (u16)max(payload->in.size, payload->out.size)); 4601 (u16)max(payload->in.size, payload->out.size));
4602 4602
4603 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) = 4603 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)) =
4604 gk20a_alloc(&pmu->dmem, 4604 nvgpu_alloc(&pmu->dmem,
4605 pv->pmu_allocation_get_dmem_size(pmu, in)); 4605 pv->pmu_allocation_get_dmem_size(pmu, in));
4606 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in))) 4606 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, in)))
4607 goto clean_up; 4607 goto clean_up;
@@ -4644,7 +4644,7 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
4644 4644
4645 if (payload->in.buf != payload->out.buf) { 4645 if (payload->in.buf != payload->out.buf) {
4646 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, out)) = 4646 *(pv->pmu_allocation_get_dmem_offset_addr(pmu, out)) =
4647 gk20a_alloc(&pmu->dmem, 4647 nvgpu_alloc(&pmu->dmem,
4648 pv->pmu_allocation_get_dmem_size(pmu, out)); 4648 pv->pmu_allocation_get_dmem_size(pmu, out));
4649 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu, 4649 if (!*(pv->pmu_allocation_get_dmem_offset_addr(pmu,
4650 out))) 4650 out)))
@@ -4694,10 +4694,10 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
4694clean_up: 4694clean_up:
4695 gk20a_dbg_fn("fail"); 4695 gk20a_dbg_fn("fail");
4696 if (in) 4696 if (in)
4697 gk20a_free(&pmu->dmem, 4697 nvgpu_free(&pmu->dmem,
4698 pv->pmu_allocation_get_dmem_offset(pmu, in)); 4698 pv->pmu_allocation_get_dmem_offset(pmu, in));
4699 if (out) 4699 if (out)
4700 gk20a_free(&pmu->dmem, 4700 nvgpu_free(&pmu->dmem,
4701 pv->pmu_allocation_get_dmem_offset(pmu, out)); 4701 pv->pmu_allocation_get_dmem_offset(pmu, out));
4702 4702
4703 pmu_seq_release(pmu, seq); 4703 pmu_seq_release(pmu, seq);
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
index cf4f3b52..32e2ef54 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
@@ -709,7 +709,7 @@ struct pmu_gk20a {
709 struct mutex pmu_copy_lock; 709 struct mutex pmu_copy_lock;
710 struct mutex pmu_seq_lock; 710 struct mutex pmu_seq_lock;
711 711
712 struct gk20a_allocator dmem; 712 struct nvgpu_allocator dmem;
713 713
714 u32 *ucode_image; 714 u32 *ucode_image;
715 bool pmu_ready; 715 bool pmu_ready;
diff --git a/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.h b/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.h
index cf724fdb..8e09fcfc 100644
--- a/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.h
@@ -18,10 +18,11 @@
18#include <linux/list.h> 18#include <linux/list.h>
19#include <linux/delay.h> 19#include <linux/delay.h>
20 20
21#include <nvgpu/allocator.h>
22
21#include "gk20a.h" 23#include "gk20a.h"
22#include "mm_gk20a.h" 24#include "mm_gk20a.h"
23#include "channel_gk20a.h" 25#include "channel_gk20a.h"
24#include "gk20a_allocator.h"
25 26
26#define gpu_sema_dbg(fmt, args...) \ 27#define gpu_sema_dbg(fmt, args...) \
27 gk20a_dbg(gpu_dbg_sema, fmt, ##args) 28 gk20a_dbg(gpu_dbg_sema, fmt, ##args)
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_allocator.h b/drivers/gpu/nvgpu/include/nvgpu/allocator.h
index b12926b3..dee9b562 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a_allocator.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/allocator.h
@@ -14,8 +14,8 @@
14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */ 15 */
16 16
17#ifndef GK20A_ALLOCATOR_H 17#ifndef NVGPU_ALLOCATOR_H
18#define GK20A_ALLOCATOR_H 18#define NVGPU_ALLOCATOR_H
19 19
20#include <linux/debugfs.h> 20#include <linux/debugfs.h>
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
@@ -23,17 +23,17 @@
23 23
24/* #define ALLOCATOR_DEBUG */ 24/* #define ALLOCATOR_DEBUG */
25 25
26struct gk20a_allocator; 26struct nvgpu_allocator;
27struct gk20a_alloc_carveout; 27struct nvgpu_alloc_carveout;
28struct vm_gk20a; 28struct vm_gk20a;
29struct gk20a; 29struct gk20a;
30 30
31/* 31/*
32 * Operations for an allocator to implement. 32 * Operations for an allocator to implement.
33 */ 33 */
34struct gk20a_allocator_ops { 34struct nvgpu_allocator_ops {
35 u64 (*alloc)(struct gk20a_allocator *allocator, u64 len); 35 u64 (*alloc)(struct nvgpu_allocator *allocator, u64 len);
36 void (*free)(struct gk20a_allocator *allocator, u64 addr); 36 void (*free)(struct nvgpu_allocator *allocator, u64 addr);
37 37
38 /* 38 /*
39 * Special interface to allocate a memory region with a specific 39 * Special interface to allocate a memory region with a specific
@@ -42,53 +42,53 @@ struct gk20a_allocator_ops {
42 * be implemented. This behavior exists for legacy reasons and should 42 * be implemented. This behavior exists for legacy reasons and should
43 * not be propagated to new allocators. 43 * not be propagated to new allocators.
44 */ 44 */
45 u64 (*alloc_fixed)(struct gk20a_allocator *allocator, 45 u64 (*alloc_fixed)(struct nvgpu_allocator *allocator,
46 u64 base, u64 len); 46 u64 base, u64 len);
47 void (*free_fixed)(struct gk20a_allocator *allocator, 47 void (*free_fixed)(struct nvgpu_allocator *allocator,
48 u64 base, u64 len); 48 u64 base, u64 len);
49 49
50 /* 50 /*
51 * Allow allocators to reserve space for carveouts. 51 * Allow allocators to reserve space for carveouts.
52 */ 52 */
53 int (*reserve_carveout)(struct gk20a_allocator *allocator, 53 int (*reserve_carveout)(struct nvgpu_allocator *allocator,
54 struct gk20a_alloc_carveout *co); 54 struct nvgpu_alloc_carveout *co);
55 void (*release_carveout)(struct gk20a_allocator *allocator, 55 void (*release_carveout)(struct nvgpu_allocator *allocator,
56 struct gk20a_alloc_carveout *co); 56 struct nvgpu_alloc_carveout *co);
57 57
58 /* 58 /*
59 * Returns info about the allocator. 59 * Returns info about the allocator.
60 */ 60 */
61 u64 (*base)(struct gk20a_allocator *allocator); 61 u64 (*base)(struct nvgpu_allocator *allocator);
62 u64 (*length)(struct gk20a_allocator *allocator); 62 u64 (*length)(struct nvgpu_allocator *allocator);
63 u64 (*end)(struct gk20a_allocator *allocator); 63 u64 (*end)(struct nvgpu_allocator *allocator);
64 int (*inited)(struct gk20a_allocator *allocator); 64 int (*inited)(struct nvgpu_allocator *allocator);
65 u64 (*space)(struct gk20a_allocator *allocator); 65 u64 (*space)(struct nvgpu_allocator *allocator);
66 66
67 /* Destructor. */ 67 /* Destructor. */
68 void (*fini)(struct gk20a_allocator *allocator); 68 void (*fini)(struct nvgpu_allocator *allocator);
69 69
70 /* Debugging. */ 70 /* Debugging. */
71 void (*print_stats)(struct gk20a_allocator *allocator, 71 void (*print_stats)(struct nvgpu_allocator *allocator,
72 struct seq_file *s, int lock); 72 struct seq_file *s, int lock);
73}; 73};
74 74
75struct gk20a_allocator { 75struct nvgpu_allocator {
76 char name[32]; 76 char name[32];
77 struct mutex lock; 77 struct mutex lock;
78 78
79 void *priv; 79 void *priv;
80 const struct gk20a_allocator_ops *ops; 80 const struct nvgpu_allocator_ops *ops;
81 81
82 struct dentry *debugfs_entry; 82 struct dentry *debugfs_entry;
83 bool debug; /* Control for debug msgs. */ 83 bool debug; /* Control for debug msgs. */
84}; 84};
85 85
86struct gk20a_alloc_carveout { 86struct nvgpu_alloc_carveout {
87 const char *name; 87 const char *name;
88 u64 base; 88 u64 base;
89 u64 length; 89 u64 length;
90 90
91 struct gk20a_allocator *allocator; 91 struct nvgpu_allocator *allocator;
92 92
93 /* 93 /*
94 * For usage by the allocator implementation. 94 * For usage by the allocator implementation.
@@ -96,7 +96,7 @@ struct gk20a_alloc_carveout {
96 struct list_head co_entry; 96 struct list_head co_entry;
97}; 97};
98 98
99#define GK20A_CARVEOUT(__name, __base, __length) \ 99#define NVGPU_CARVEOUT(__name, __base, __length) \
100 { \ 100 { \
101 .name = (__name), \ 101 .name = (__name), \
102 .base = (__base), \ 102 .base = (__base), \
@@ -161,12 +161,12 @@ struct gk20a_alloc_carveout {
161#define GPU_ALLOC_FORCE_CONTIG 0x8 161#define GPU_ALLOC_FORCE_CONTIG 0x8
162#define GPU_ALLOC_NO_SCATTER_GATHER 0x10 162#define GPU_ALLOC_NO_SCATTER_GATHER 0x10
163 163
164static inline void alloc_lock(struct gk20a_allocator *a) 164static inline void alloc_lock(struct nvgpu_allocator *a)
165{ 165{
166 mutex_lock(&a->lock); 166 mutex_lock(&a->lock);
167} 167}
168 168
169static inline void alloc_unlock(struct gk20a_allocator *a) 169static inline void alloc_unlock(struct nvgpu_allocator *a)
170{ 170{
171 mutex_unlock(&a->lock); 171 mutex_unlock(&a->lock);
172} 172}
@@ -174,25 +174,25 @@ static inline void alloc_unlock(struct gk20a_allocator *a)
174/* 174/*
175 * Buddy allocator specific initializers. 175 * Buddy allocator specific initializers.
176 */ 176 */
177int __gk20a_buddy_allocator_init(struct gk20a *g, struct gk20a_allocator *a, 177int __nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *a,
178 struct vm_gk20a *vm, const char *name, 178 struct vm_gk20a *vm, const char *name,
179 u64 base, u64 size, u64 blk_size, 179 u64 base, u64 size, u64 blk_size,
180 u64 max_order, u64 flags); 180 u64 max_order, u64 flags);
181int gk20a_buddy_allocator_init(struct gk20a *g, struct gk20a_allocator *a, 181int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *a,
182 const char *name, u64 base, u64 size, 182 const char *name, u64 base, u64 size,
183 u64 blk_size, u64 flags); 183 u64 blk_size, u64 flags);
184 184
185/* 185/*
186 * Bitmap initializers. 186 * Bitmap initializers.
187 */ 187 */
188int gk20a_bitmap_allocator_init(struct gk20a *g, struct gk20a_allocator *a, 188int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *a,
189 const char *name, u64 base, u64 length, 189 const char *name, u64 base, u64 length,
190 u64 blk_size, u64 flags); 190 u64 blk_size, u64 flags);
191 191
192/* 192/*
193 * Page allocator initializers. 193 * Page allocator initializers.
194 */ 194 */
195int gk20a_page_allocator_init(struct gk20a *g, struct gk20a_allocator *a, 195int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *a,
196 const char *name, u64 base, u64 length, 196 const char *name, u64 base, u64 length,
197 u64 blk_size, u64 flags); 197 u64 blk_size, u64 flags);
198 198
@@ -201,7 +201,7 @@ int gk20a_page_allocator_init(struct gk20a *g, struct gk20a_allocator *a,
201 * Note: This allocator can only allocate fixed-size structures of a 201 * Note: This allocator can only allocate fixed-size structures of a
202 * pre-defined size. 202 * pre-defined size.
203 */ 203 */
204int gk20a_lockless_allocator_init(struct gk20a *g, struct gk20a_allocator *a, 204int nvgpu_lockless_allocator_init(struct gk20a *g, struct nvgpu_allocator *a,
205 const char *name, u64 base, u64 length, 205 const char *name, u64 base, u64 length,
206 u64 struct_size, u64 flags); 206 u64 struct_size, u64 flags);
207 207
@@ -210,44 +210,44 @@ int gk20a_lockless_allocator_init(struct gk20a *g, struct gk20a_allocator *a,
210/* 210/*
211 * Allocator APIs. 211 * Allocator APIs.
212 */ 212 */
213u64 gk20a_alloc(struct gk20a_allocator *allocator, u64 len); 213u64 nvgpu_alloc(struct nvgpu_allocator *allocator, u64 len);
214void gk20a_free(struct gk20a_allocator *allocator, u64 addr); 214void nvgpu_free(struct nvgpu_allocator *allocator, u64 addr);
215 215
216u64 gk20a_alloc_fixed(struct gk20a_allocator *allocator, u64 base, u64 len); 216u64 nvgpu_alloc_fixed(struct nvgpu_allocator *allocator, u64 base, u64 len);
217void gk20a_free_fixed(struct gk20a_allocator *allocator, u64 base, u64 len); 217void nvgpu_free_fixed(struct nvgpu_allocator *allocator, u64 base, u64 len);
218 218
219int gk20a_alloc_reserve_carveout(struct gk20a_allocator *a, 219int nvgpu_alloc_reserve_carveout(struct nvgpu_allocator *a,
220 struct gk20a_alloc_carveout *co); 220 struct nvgpu_alloc_carveout *co);
221void gk20a_alloc_release_carveout(struct gk20a_allocator *a, 221void nvgpu_alloc_release_carveout(struct nvgpu_allocator *a,
222 struct gk20a_alloc_carveout *co); 222 struct nvgpu_alloc_carveout *co);
223 223
224u64 gk20a_alloc_base(struct gk20a_allocator *a); 224u64 nvgpu_alloc_base(struct nvgpu_allocator *a);
225u64 gk20a_alloc_length(struct gk20a_allocator *a); 225u64 nvgpu_alloc_length(struct nvgpu_allocator *a);
226u64 gk20a_alloc_end(struct gk20a_allocator *a); 226u64 nvgpu_alloc_end(struct nvgpu_allocator *a);
227u64 gk20a_alloc_initialized(struct gk20a_allocator *a); 227u64 nvgpu_alloc_initialized(struct nvgpu_allocator *a);
228u64 gk20a_alloc_space(struct gk20a_allocator *a); 228u64 nvgpu_alloc_space(struct nvgpu_allocator *a);
229 229
230void gk20a_alloc_destroy(struct gk20a_allocator *allocator); 230void nvgpu_alloc_destroy(struct nvgpu_allocator *allocator);
231 231
232void gk20a_alloc_print_stats(struct gk20a_allocator *a, 232void nvgpu_alloc_print_stats(struct nvgpu_allocator *a,
233 struct seq_file *s, int lock); 233 struct seq_file *s, int lock);
234 234
235/* 235/*
236 * Common functionality for the internals of the allocators. 236 * Common functionality for the internals of the allocators.
237 */ 237 */
238void gk20a_init_alloc_debug(struct gk20a *g, struct gk20a_allocator *a); 238void nvgpu_init_alloc_debug(struct gk20a *g, struct nvgpu_allocator *a);
239void gk20a_fini_alloc_debug(struct gk20a_allocator *a); 239void nvgpu_fini_alloc_debug(struct nvgpu_allocator *a);
240 240
241int __gk20a_alloc_common_init(struct gk20a_allocator *a, 241int __nvgpu_alloc_common_init(struct nvgpu_allocator *a,
242 const char *name, void *priv, bool dbg, 242 const char *name, void *priv, bool dbg,
243 const struct gk20a_allocator_ops *ops); 243 const struct nvgpu_allocator_ops *ops);
244 244
245static inline void gk20a_alloc_enable_dbg(struct gk20a_allocator *a) 245static inline void nvgpu_alloc_enable_dbg(struct nvgpu_allocator *a)
246{ 246{
247 a->debug = true; 247 a->debug = true;
248} 248}
249 249
250static inline void gk20a_alloc_disable_dbg(struct gk20a_allocator *a) 250static inline void nvgpu_alloc_disable_dbg(struct nvgpu_allocator *a)
251{ 251{
252 a->debug = false; 252 a->debug = false;
253} 253}
@@ -255,19 +255,19 @@ static inline void gk20a_alloc_disable_dbg(struct gk20a_allocator *a)
255/* 255/*
256 * Debug stuff. 256 * Debug stuff.
257 */ 257 */
258extern u32 gk20a_alloc_tracing_on; 258extern u32 nvgpu_alloc_tracing_on;
259 259
260void gk20a_alloc_debugfs_init(struct device *dev); 260void nvgpu_alloc_debugfs_init(struct device *dev);
261 261
262#define gk20a_alloc_trace_func() \ 262#define nvgpu_alloc_trace_func() \
263 do { \ 263 do { \
264 if (gk20a_alloc_tracing_on) \ 264 if (nvgpu_alloc_tracing_on) \
265 trace_printk("%s\n", __func__); \ 265 trace_printk("%s\n", __func__); \
266 } while (0) 266 } while (0)
267 267
268#define gk20a_alloc_trace_func_done() \ 268#define nvgpu_alloc_trace_func_done() \
269 do { \ 269 do { \
270 if (gk20a_alloc_tracing_on) \ 270 if (nvgpu_alloc_tracing_on) \
271 trace_printk("%s_done\n", __func__); \ 271 trace_printk("%s_done\n", __func__); \
272 } while (0) 272 } while (0)
273 273
@@ -299,4 +299,4 @@ void gk20a_alloc_debugfs_init(struct device *dev);
299 299
300#endif 300#endif
301 301
302#endif /* GK20A_ALLOCATOR_H */ 302#endif /* NVGPU_ALLOCATOR_H */
diff --git a/drivers/gpu/nvgpu/gk20a/page_allocator_priv.h b/drivers/gpu/nvgpu/include/nvgpu/page_allocator.h
index 7d7f43c2..7c21c117 100644
--- a/drivers/gpu/nvgpu/gk20a/page_allocator_priv.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/page_allocator.h
@@ -20,9 +20,9 @@
20#include <linux/list.h> 20#include <linux/list.h>
21#include <linux/rbtree.h> 21#include <linux/rbtree.h>
22 22
23#include "gk20a_allocator.h" 23#include <nvgpu/allocator.h>
24 24
25struct gk20a_allocator; 25struct nvgpu_allocator;
26 26
27/* 27/*
28 * This allocator implements the ability to do SLAB style allocation since the 28 * This allocator implements the ability to do SLAB style allocation since the
@@ -88,7 +88,7 @@ struct page_alloc_chunk {
88 * of the chunks of pages that make up the overall allocation - much like a 88 * of the chunks of pages that make up the overall allocation - much like a
89 * scatter gather table. 89 * scatter gather table.
90 */ 90 */
91struct gk20a_page_alloc { 91struct nvgpu_page_alloc {
92 struct list_head alloc_chunks; 92 struct list_head alloc_chunks;
93 93
94 int nr_chunks; 94 int nr_chunks;
@@ -111,15 +111,15 @@ struct gk20a_page_alloc {
111 struct page_alloc_slab_page *slab_page; 111 struct page_alloc_slab_page *slab_page;
112}; 112};
113 113
114struct gk20a_page_allocator { 114struct nvgpu_page_allocator {
115 struct gk20a_allocator *owner; /* Owner of this allocator. */ 115 struct nvgpu_allocator *owner; /* Owner of this allocator. */
116 116
117 /* 117 /*
118 * Use a buddy allocator to manage the allocation of the underlying 118 * Use a buddy allocator to manage the allocation of the underlying
119 * pages. This lets us abstract the discontiguous allocation handling 119 * pages. This lets us abstract the discontiguous allocation handling
120 * out of the annoyingly complicated buddy allocator. 120 * out of the annoyingly complicated buddy allocator.
121 */ 121 */
122 struct gk20a_allocator source_allocator; 122 struct nvgpu_allocator source_allocator;
123 123
124 /* 124 /*
125 * Page params. 125 * Page params.
@@ -149,14 +149,14 @@ struct gk20a_page_allocator {
149 u64 pages_freed; 149 u64 pages_freed;
150}; 150};
151 151
152static inline struct gk20a_page_allocator *page_allocator( 152static inline struct nvgpu_page_allocator *page_allocator(
153 struct gk20a_allocator *a) 153 struct nvgpu_allocator *a)
154{ 154{
155 return (struct gk20a_page_allocator *)(a)->priv; 155 return (struct nvgpu_page_allocator *)(a)->priv;
156} 156}
157 157
158static inline struct gk20a_allocator *palloc_owner( 158static inline struct nvgpu_allocator *palloc_owner(
159 struct gk20a_page_allocator *a) 159 struct nvgpu_page_allocator *a)
160{ 160{
161 return a->owner; 161 return a->owner;
162} 162}
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
index 69f6fcaf..66c9344b 100644
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
@@ -227,11 +227,11 @@ static void vgpu_vm_remove_support(struct vm_gk20a *vm)
227 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 227 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
228 WARN_ON(err || msg.ret); 228 WARN_ON(err || msg.ret);
229 229
230 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_kernel]); 230 nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_kernel]);
231 if (gk20a_alloc_initialized(&vm->vma[gmmu_page_size_small])) 231 if (nvgpu_alloc_initialized(&vm->vma[gmmu_page_size_small]))
232 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_small]); 232 nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_small]);
233 if (gk20a_alloc_initialized(&vm->vma[gmmu_page_size_big])) 233 if (nvgpu_alloc_initialized(&vm->vma[gmmu_page_size_big]))
234 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_big]); 234 nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_big]);
235 235
236 mutex_unlock(&vm->update_gmmu_lock); 236 mutex_unlock(&vm->update_gmmu_lock);
237 237
@@ -370,7 +370,7 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
370 snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id, 370 snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id,
371 gmmu_page_sizes[gmmu_page_size_small] >> 10); 371 gmmu_page_sizes[gmmu_page_size_small] >> 10);
372 372
373 err = __gk20a_buddy_allocator_init( 373 err = __nvgpu_buddy_allocator_init(
374 g, 374 g,
375 &vm->vma[gmmu_page_size_small], 375 &vm->vma[gmmu_page_size_small],
376 vm, name, 376 vm, name,
@@ -386,7 +386,7 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
386 if (large_vma_start < large_vma_limit) { 386 if (large_vma_start < large_vma_limit) {
387 snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id, 387 snprintf(name, sizeof(name), "gk20a_as_%d-%dKB", as_share->id,
388 gmmu_page_sizes[gmmu_page_size_big] >> 10); 388 gmmu_page_sizes[gmmu_page_size_big] >> 10);
389 err = __gk20a_buddy_allocator_init( 389 err = __nvgpu_buddy_allocator_init(
390 g, 390 g,
391 &vm->vma[gmmu_page_size_big], 391 &vm->vma[gmmu_page_size_big],
392 vm, name, 392 vm, name,
@@ -404,7 +404,7 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
404 /* 404 /*
405 * kernel reserved VMA is at the end of the aperture 405 * kernel reserved VMA is at the end of the aperture
406 */ 406 */
407 err = __gk20a_buddy_allocator_init( 407 err = __nvgpu_buddy_allocator_init(
408 g, 408 g,
409 &vm->vma[gmmu_page_size_kernel], 409 &vm->vma[gmmu_page_size_kernel],
410 vm, name, 410 vm, name,
@@ -428,10 +428,10 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share,
428 428
429clean_up_big_allocator: 429clean_up_big_allocator:
430 if (large_vma_start < large_vma_limit) 430 if (large_vma_start < large_vma_limit)
431 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_big]); 431 nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_big]);
432clean_up_small_allocator: 432clean_up_small_allocator:
433 if (small_vma_start < small_vma_limit) 433 if (small_vma_start < small_vma_limit)
434 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_small]); 434 nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_small]);
435clean_up_share: 435clean_up_share:
436 msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE; 436 msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE;
437 msg.handle = vgpu_get_handle(g); 437 msg.handle = vgpu_get_handle(g);