summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-03-08 19:51:33 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-03-26 12:55:10 -0400
commitc11228d48be1825e1ec84afd38c6938504fa4100 (patch)
treeea8bb9c874ba14b7c06a4de11d6619f88e2a4104 /drivers/gpu
parente0f2afe5eb43fb32490ccabd504879c3e3e54623 (diff)
gpu: nvgpu: Use new kmem API functions (common/*)
Use the new kmem API functions in common/* and common/mm/*. Add a struct gk20a pointer to struct nvgpu_allocator in order to store the gk20a pointer used for allocating memory. Bug 1799159 Bug 1823380 Change-Id: I881ea9545e8a8f0b75d77a1e35dd1812e0bb654e Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1318315 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/nvgpu/common/mm/bitmap_allocator.c24
-rw-r--r--drivers/gpu/nvgpu/common/mm/buddy_allocator.c15
-rw-r--r--drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h3
-rw-r--r--drivers/gpu/nvgpu/common/mm/lockless_allocator.c11
-rw-r--r--drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c3
-rw-r--r--drivers/gpu/nvgpu/common/mm/page_allocator.c15
-rw-r--r--drivers/gpu/nvgpu/common/nvgpu_common.c12
-rw-r--r--drivers/gpu/nvgpu/common/semaphore.c31
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/allocator.h10
9 files changed, 71 insertions, 53 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c b/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c
index 6fc508d6..6e3bad6f 100644
--- a/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/bitmap_allocator.c
@@ -19,6 +19,7 @@
19#include <linux/bitops.h> 19#include <linux/bitops.h>
20 20
21#include <nvgpu/allocator.h> 21#include <nvgpu/allocator.h>
22#include <nvgpu/kmem.h>
22 23
23#include "bitmap_allocator_priv.h" 24#include "bitmap_allocator_priv.h"
24 25
@@ -248,12 +249,11 @@ static u64 nvgpu_bitmap_alloc(struct nvgpu_allocator *__a, u64 len)
248 249
249 /* 250 /*
250 * Only do meta-data storage if we are allowed to allocate storage for 251 * Only do meta-data storage if we are allowed to allocate storage for
251 * that meta-data. The issue with using kmalloc() and friends is that 252 * that meta-data. The issue with using malloc and friends is that
252 * in latency and success critical paths an alloc_page() call can either 253 * in latency and success critical paths an alloc_page() call can either
253 * sleep for potentially a long time or, assuming GFP_ATOMIC, fail. 254 * sleep for potentially a long time or fail. Since we might not want
254 * Since we might not want either of these possibilities assume that the 255 * either of these possibilities assume that the caller will keep what
255 * caller will keep what data it needs around to successfully free this 256 * data it needs around to successfully free this allocation.
256 * allocation.
257 */ 257 */
258 if (!(a->flags & GPU_ALLOC_NO_ALLOC_PAGE) && 258 if (!(a->flags & GPU_ALLOC_NO_ALLOC_PAGE) &&
259 __nvgpu_bitmap_store_alloc(a, addr, blks * a->blk_size)) 259 __nvgpu_bitmap_store_alloc(a, addr, blks * a->blk_size))
@@ -332,8 +332,8 @@ static void nvgpu_bitmap_alloc_destroy(struct nvgpu_allocator *__a)
332 } 332 }
333 333
334 nvgpu_kmem_cache_destroy(a->meta_data_cache); 334 nvgpu_kmem_cache_destroy(a->meta_data_cache);
335 kfree(a->bitmap); 335 nvgpu_kfree(nvgpu_alloc_to_gpu(__a), a->bitmap);
336 kfree(a); 336 nvgpu_kfree(nvgpu_alloc_to_gpu(__a), a);
337} 337}
338 338
339static void nvgpu_bitmap_print_stats(struct nvgpu_allocator *__a, 339static void nvgpu_bitmap_print_stats(struct nvgpu_allocator *__a,
@@ -397,11 +397,11 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
397 length -= blk_size; 397 length -= blk_size;
398 } 398 }
399 399
400 a = kzalloc(sizeof(struct nvgpu_bitmap_allocator), GFP_KERNEL); 400 a = nvgpu_kzalloc(g, sizeof(struct nvgpu_bitmap_allocator));
401 if (!a) 401 if (!a)
402 return -ENOMEM; 402 return -ENOMEM;
403 403
404 err = __nvgpu_alloc_common_init(__a, name, a, false, &bitmap_ops); 404 err = __nvgpu_alloc_common_init(__a, g, name, a, false, &bitmap_ops);
405 if (err) 405 if (err)
406 goto fail; 406 goto fail;
407 407
@@ -422,8 +422,8 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
422 a->bit_offs = a->base >> a->blk_shift; 422 a->bit_offs = a->base >> a->blk_shift;
423 a->flags = flags; 423 a->flags = flags;
424 424
425 a->bitmap = kcalloc(BITS_TO_LONGS(a->num_bits), sizeof(*a->bitmap), 425 a->bitmap = nvgpu_kcalloc(g, BITS_TO_LONGS(a->num_bits),
426 GFP_KERNEL); 426 sizeof(*a->bitmap));
427 if (!a->bitmap) { 427 if (!a->bitmap) {
428 err = -ENOMEM; 428 err = -ENOMEM;
429 goto fail; 429 goto fail;
@@ -445,6 +445,6 @@ int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
445fail: 445fail:
446 if (a->meta_data_cache) 446 if (a->meta_data_cache)
447 nvgpu_kmem_cache_destroy(a->meta_data_cache); 447 nvgpu_kmem_cache_destroy(a->meta_data_cache);
448 kfree(a); 448 nvgpu_kfree(g, a);
449 return err; 449 return err;
450} 450}
diff --git a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
index 6f4c670a..246be974 100644
--- a/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/buddy_allocator.c
@@ -18,6 +18,7 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19 19
20#include <nvgpu/allocator.h> 20#include <nvgpu/allocator.h>
21#include <nvgpu/kmem.h>
21 22
22#include "gk20a/mm_gk20a.h" 23#include "gk20a/mm_gk20a.h"
23#include "gk20a/platform_gk20a.h" 24#include "gk20a/platform_gk20a.h"
@@ -304,7 +305,7 @@ static void nvgpu_buddy_allocator_destroy(struct nvgpu_allocator *__a)
304 } 305 }
305 306
306 nvgpu_kmem_cache_destroy(a->buddy_cache); 307 nvgpu_kmem_cache_destroy(a->buddy_cache);
307 kfree(a); 308 nvgpu_kfree(nvgpu_alloc_to_gpu(__a), a);
308 309
309 alloc_unlock(__a); 310 alloc_unlock(__a);
310} 311}
@@ -809,7 +810,7 @@ static void __balloc_do_free_fixed(struct nvgpu_buddy_allocator *a,
809 balloc_coalesce(a, bud); 810 balloc_coalesce(a, bud);
810 } 811 }
811 812
812 kfree(falloc); 813 nvgpu_kfree(nvgpu_alloc_to_gpu(a->owner), falloc);
813} 814}
814 815
815/* 816/*
@@ -893,7 +894,7 @@ static u64 __nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *__a,
893 goto fail; 894 goto fail;
894 } 895 }
895 896
896 falloc = kmalloc(sizeof(*falloc), GFP_KERNEL); 897 falloc = nvgpu_kmalloc(nvgpu_alloc_to_gpu(__a), sizeof(*falloc));
897 if (!falloc) 898 if (!falloc)
898 goto fail; 899 goto fail;
899 900
@@ -932,7 +933,7 @@ static u64 __nvgpu_balloc_fixed_buddy(struct nvgpu_allocator *__a,
932fail_unlock: 933fail_unlock:
933 alloc_unlock(__a); 934 alloc_unlock(__a);
934fail: 935fail:
935 kfree(falloc); 936 nvgpu_kfree(nvgpu_alloc_to_gpu(__a), falloc);
936 nvgpu_alloc_trace_func_done(); 937 nvgpu_alloc_trace_func_done();
937 return 0; 938 return 0;
938} 939}
@@ -1261,11 +1262,11 @@ int __nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
1261 if (flags & GPU_ALLOC_GVA_SPACE && !vm) 1262 if (flags & GPU_ALLOC_GVA_SPACE && !vm)
1262 return -EINVAL; 1263 return -EINVAL;
1263 1264
1264 a = kzalloc(sizeof(struct nvgpu_buddy_allocator), GFP_KERNEL); 1265 a = nvgpu_kzalloc(g, sizeof(struct nvgpu_buddy_allocator));
1265 if (!a) 1266 if (!a)
1266 return -ENOMEM; 1267 return -ENOMEM;
1267 1268
1268 err = __nvgpu_alloc_common_init(__a, name, a, false, &buddy_ops); 1269 err = __nvgpu_alloc_common_init(__a, g, name, a, false, &buddy_ops);
1269 if (err) 1270 if (err)
1270 goto fail; 1271 goto fail;
1271 1272
@@ -1339,7 +1340,7 @@ int __nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
1339fail: 1340fail:
1340 if (a->buddy_cache) 1341 if (a->buddy_cache)
1341 nvgpu_kmem_cache_destroy(a->buddy_cache); 1342 nvgpu_kmem_cache_destroy(a->buddy_cache);
1342 kfree(a); 1343 nvgpu_kfree(g, a);
1343 return err; 1344 return err;
1344} 1345}
1345 1346
diff --git a/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h b/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h
index 56aaea62..935b3f1c 100644
--- a/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h
+++ b/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h
@@ -20,8 +20,7 @@
20#include <linux/list.h> 20#include <linux/list.h>
21#include <linux/rbtree.h> 21#include <linux/rbtree.h>
22 22
23#include <nvgpu/kmem.h> 23struct nvgpu_kmem_cache;
24
25struct nvgpu_allocator; 24struct nvgpu_allocator;
26struct vm_gk20a; 25struct vm_gk20a;
27 26
diff --git a/drivers/gpu/nvgpu/common/mm/lockless_allocator.c b/drivers/gpu/nvgpu/common/mm/lockless_allocator.c
index e3063a42..6fd9bc48 100644
--- a/drivers/gpu/nvgpu/common/mm/lockless_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/lockless_allocator.c
@@ -20,6 +20,7 @@
20#include <linux/atomic.h> 20#include <linux/atomic.h>
21 21
22#include <nvgpu/allocator.h> 22#include <nvgpu/allocator.h>
23#include <nvgpu/kmem.h>
23 24
24#include "lockless_allocator_priv.h" 25#include "lockless_allocator_priv.h"
25 26
@@ -106,7 +107,7 @@ static void nvgpu_lockless_alloc_destroy(struct nvgpu_allocator *a)
106 nvgpu_fini_alloc_debug(a); 107 nvgpu_fini_alloc_debug(a);
107 108
108 vfree(pa->next); 109 vfree(pa->next);
109 kfree(pa); 110 nvgpu_kfree(nvgpu_alloc_to_gpu(a), pa);
110} 111}
111 112
112static void nvgpu_lockless_print_stats(struct nvgpu_allocator *a, 113static void nvgpu_lockless_print_stats(struct nvgpu_allocator *a,
@@ -154,7 +155,7 @@ int nvgpu_lockless_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
154 return -EINVAL; 155 return -EINVAL;
155 156
156 /* 157 /*
157 * Ensure we have space for atleast one node & there's no overflow. 158 * Ensure we have space for at least one node & there's no overflow.
158 * In order to control memory footprint, we require count < INT_MAX 159 * In order to control memory footprint, we require count < INT_MAX
159 */ 160 */
160 count = length; 161 count = length;
@@ -162,11 +163,11 @@ int nvgpu_lockless_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
162 if (!base || !count || count > INT_MAX) 163 if (!base || !count || count > INT_MAX)
163 return -EINVAL; 164 return -EINVAL;
164 165
165 a = kzalloc(sizeof(struct nvgpu_lockless_allocator), GFP_KERNEL); 166 a = nvgpu_kzalloc(g, sizeof(struct nvgpu_lockless_allocator));
166 if (!a) 167 if (!a)
167 return -ENOMEM; 168 return -ENOMEM;
168 169
169 err = __nvgpu_alloc_common_init(__a, name, a, false, &pool_ops); 170 err = __nvgpu_alloc_common_init(__a, g, name, a, false, &pool_ops);
170 if (err) 171 if (err)
171 goto fail; 172 goto fail;
172 173
@@ -202,6 +203,6 @@ int nvgpu_lockless_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
202 return 0; 203 return 0;
203 204
204fail: 205fail:
205 kfree(a); 206 nvgpu_kfree(g, a);
206 return err; 207 return err;
207} 208}
diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c b/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c
index 24a76a0b..02b7b48d 100644
--- a/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/nvgpu_allocator.c
@@ -123,7 +123,7 @@ void nvgpu_alloc_destroy(struct nvgpu_allocator *a)
123/* 123/*
124 * Handle the common init stuff for a nvgpu_allocator. 124 * Handle the common init stuff for a nvgpu_allocator.
125 */ 125 */
126int __nvgpu_alloc_common_init(struct nvgpu_allocator *a, 126int __nvgpu_alloc_common_init(struct nvgpu_allocator *a, struct gk20a *g,
127 const char *name, void *priv, bool dbg, 127 const char *name, void *priv, bool dbg,
128 const struct nvgpu_allocator_ops *ops) 128 const struct nvgpu_allocator_ops *ops)
129{ 129{
@@ -143,6 +143,7 @@ int __nvgpu_alloc_common_init(struct nvgpu_allocator *a,
143 if (err) 143 if (err)
144 return err; 144 return err;
145 145
146 a->g = g;
146 a->ops = ops; 147 a->ops = ops;
147 a->priv = priv; 148 a->priv = priv;
148 a->debug = dbg; 149 a->debug = dbg;
diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c
index 193decc9..7d2cedc9 100644
--- a/drivers/gpu/nvgpu/common/mm/page_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c
@@ -21,6 +21,7 @@
21 21
22#include <nvgpu/allocator.h> 22#include <nvgpu/allocator.h>
23#include <nvgpu/page_allocator.h> 23#include <nvgpu/page_allocator.h>
24#include <nvgpu/kmem.h>
24 25
25#include "buddy_allocator_priv.h" 26#include "buddy_allocator_priv.h"
26 27
@@ -760,7 +761,7 @@ static void nvgpu_page_allocator_destroy(struct nvgpu_allocator *__a)
760 struct nvgpu_page_allocator *a = page_allocator(__a); 761 struct nvgpu_page_allocator *a = page_allocator(__a);
761 762
762 alloc_lock(__a); 763 alloc_lock(__a);
763 kfree(a); 764 nvgpu_kfree(nvgpu_alloc_to_gpu(__a), a);
764 __a->priv = NULL; 765 __a->priv = NULL;
765 alloc_unlock(__a); 766 alloc_unlock(__a);
766} 767}
@@ -848,9 +849,9 @@ static int nvgpu_page_alloc_init_slabs(struct nvgpu_page_allocator *a)
848 size_t nr_slabs = ilog2(a->page_size >> 12); 849 size_t nr_slabs = ilog2(a->page_size >> 12);
849 unsigned int i; 850 unsigned int i;
850 851
851 a->slabs = kcalloc(nr_slabs, 852 a->slabs = nvgpu_kcalloc(nvgpu_alloc_to_gpu(a->owner),
852 sizeof(struct page_alloc_slab), 853 nr_slabs,
853 GFP_KERNEL); 854 sizeof(struct page_alloc_slab));
854 if (!a->slabs) 855 if (!a->slabs)
855 return -ENOMEM; 856 return -ENOMEM;
856 a->nr_slabs = nr_slabs; 857 a->nr_slabs = nr_slabs;
@@ -881,11 +882,11 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
881 if (blk_size < SZ_4K) 882 if (blk_size < SZ_4K)
882 return -EINVAL; 883 return -EINVAL;
883 884
884 a = kzalloc(sizeof(struct nvgpu_page_allocator), GFP_KERNEL); 885 a = nvgpu_kzalloc(g, sizeof(struct nvgpu_page_allocator));
885 if (!a) 886 if (!a)
886 return -ENOMEM; 887 return -ENOMEM;
887 888
888 err = __nvgpu_alloc_common_init(__a, name, a, false, &page_ops); 889 err = __nvgpu_alloc_common_init(__a, g, name, a, false, &page_ops);
889 if (err) 890 if (err)
890 goto fail; 891 goto fail;
891 892
@@ -938,6 +939,6 @@ fail:
938 nvgpu_kmem_cache_destroy(a->chunk_cache); 939 nvgpu_kmem_cache_destroy(a->chunk_cache);
939 if (a->slab_page_cache) 940 if (a->slab_page_cache)
940 nvgpu_kmem_cache_destroy(a->slab_page_cache); 941 nvgpu_kmem_cache_destroy(a->slab_page_cache);
941 kfree(a); 942 nvgpu_kfree(g, a);
942 return err; 943 return err;
943} 944}
diff --git a/drivers/gpu/nvgpu/common/nvgpu_common.c b/drivers/gpu/nvgpu/common/nvgpu_common.c
index d7ff4841..771d4121 100644
--- a/drivers/gpu/nvgpu/common/nvgpu_common.c
+++ b/drivers/gpu/nvgpu/common/nvgpu_common.c
@@ -17,11 +17,12 @@
17#include <linux/dma-mapping.h> 17#include <linux/dma-mapping.h>
18#include <linux/firmware.h> 18#include <linux/firmware.h>
19 19
20#include <nvgpu/kmem.h>
21#include <nvgpu/nvgpu_common.h>
22
20#include "gk20a/gk20a_scale.h" 23#include "gk20a/gk20a_scale.h"
21#include "gk20a/gk20a.h" 24#include "gk20a/gk20a.h"
22 25
23#include <nvgpu/nvgpu_common.h>
24
25#define EMC3D_DEFAULT_RATIO 750 26#define EMC3D_DEFAULT_RATIO 750
26 27
27static void nvgpu_init_vars(struct gk20a *g) 28static void nvgpu_init_vars(struct gk20a *g)
@@ -164,7 +165,7 @@ int nvgpu_probe(struct gk20a *g,
164 gk20a_create_sysfs(g->dev); 165 gk20a_create_sysfs(g->dev);
165 gk20a_debug_init(g->dev, debugfs_symlink); 166 gk20a_debug_init(g->dev, debugfs_symlink);
166 167
167 g->dbg_regops_tmp_buf = kzalloc(SZ_4K, GFP_KERNEL); 168 g->dbg_regops_tmp_buf = nvgpu_kzalloc(g, SZ_4K);
168 if (!g->dbg_regops_tmp_buf) { 169 if (!g->dbg_regops_tmp_buf) {
169 dev_err(g->dev, "couldn't allocate regops tmp buf"); 170 dev_err(g->dev, "couldn't allocate regops tmp buf");
170 return -ENOMEM; 171 return -ENOMEM;
@@ -190,7 +191,8 @@ static const struct firmware *do_request_firmware(struct device *dev,
190 path_len = strlen(prefix) + strlen(fw_name); 191 path_len = strlen(prefix) + strlen(fw_name);
191 path_len += 2; /* for the path separator and zero terminator*/ 192 path_len += 2; /* for the path separator and zero terminator*/
192 193
193 fw_path = kzalloc(sizeof(*fw_path) * path_len, GFP_KERNEL); 194 fw_path = nvgpu_kzalloc(get_gk20a(dev),
195 sizeof(*fw_path) * path_len);
194 if (!fw_path) 196 if (!fw_path)
195 return NULL; 197 return NULL;
196 198
@@ -207,7 +209,7 @@ static const struct firmware *do_request_firmware(struct device *dev,
207 err = request_firmware(&fw, fw_name, dev); 209 err = request_firmware(&fw, fw_name, dev);
208#endif 210#endif
209 211
210 kfree(fw_path); 212 nvgpu_kfree(get_gk20a(dev), fw_path);
211 if (err) 213 if (err)
212 return NULL; 214 return NULL;
213 return fw; 215 return fw;
diff --git a/drivers/gpu/nvgpu/common/semaphore.c b/drivers/gpu/nvgpu/common/semaphore.c
index ff86ada9..675794d1 100644
--- a/drivers/gpu/nvgpu/common/semaphore.c
+++ b/drivers/gpu/nvgpu/common/semaphore.c
@@ -20,6 +20,10 @@
20#include <linux/slab.h> 20#include <linux/slab.h>
21 21
22#include <nvgpu/semaphore.h> 22#include <nvgpu/semaphore.h>
23#include <nvgpu/kmem.h>
24
25#include "gk20a/gk20a.h"
26#include "gk20a/mm_gk20a.h"
23 27
24#define __lock_sema_sea(s) \ 28#define __lock_sema_sea(s) \
25 do { \ 29 do { \
@@ -83,7 +87,7 @@ struct nvgpu_semaphore_sea *nvgpu_semaphore_sea_create(struct gk20a *g)
83 if (g->sema_sea) 87 if (g->sema_sea)
84 return g->sema_sea; 88 return g->sema_sea;
85 89
86 g->sema_sea = kzalloc(sizeof(*g->sema_sea), GFP_KERNEL); 90 g->sema_sea = nvgpu_kzalloc(g, sizeof(*g->sema_sea));
87 if (!g->sema_sea) 91 if (!g->sema_sea)
88 return NULL; 92 return NULL;
89 93
@@ -103,7 +107,7 @@ struct nvgpu_semaphore_sea *nvgpu_semaphore_sea_create(struct gk20a *g)
103cleanup_destroy: 107cleanup_destroy:
104 nvgpu_mutex_destroy(&g->sema_sea->sea_lock); 108 nvgpu_mutex_destroy(&g->sema_sea->sea_lock);
105cleanup_free: 109cleanup_free:
106 kfree(g->sema_sea); 110 nvgpu_kfree(g, g->sema_sea);
107 g->sema_sea = NULL; 111 g->sema_sea = NULL;
108 gpu_sema_dbg("Failed to creat semaphore sea!"); 112 gpu_sema_dbg("Failed to creat semaphore sea!");
109 return NULL; 113 return NULL;
@@ -131,7 +135,7 @@ struct nvgpu_semaphore_pool *nvgpu_semaphore_pool_alloc(
131 unsigned long page_idx; 135 unsigned long page_idx;
132 int ret, err = 0; 136 int ret, err = 0;
133 137
134 p = kzalloc(sizeof(*p), GFP_KERNEL); 138 p = nvgpu_kzalloc(sea->gk20a, sizeof(*p));
135 if (!p) 139 if (!p)
136 return ERR_PTR(-ENOMEM); 140 return ERR_PTR(-ENOMEM);
137 141
@@ -168,7 +172,7 @@ fail_alloc:
168 nvgpu_mutex_destroy(&p->pool_lock); 172 nvgpu_mutex_destroy(&p->pool_lock);
169fail: 173fail:
170 __unlock_sema_sea(sea); 174 __unlock_sema_sea(sea);
171 kfree(p); 175 nvgpu_kfree(sea->gk20a, p);
172 gpu_sema_dbg("Failed to allocate semaphore pool!"); 176 gpu_sema_dbg("Failed to allocate semaphore pool!");
173 return ERR_PTR(err); 177 return ERR_PTR(err);
174} 178}
@@ -191,7 +195,8 @@ int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p,
191 gpu_sema_dbg(" %d: CPU VA = 0x%p!", p->page_idx, p->cpu_va); 195 gpu_sema_dbg(" %d: CPU VA = 0x%p!", p->page_idx, p->cpu_va);
192 196
193 /* First do the RW mapping. */ 197 /* First do the RW mapping. */
194 p->rw_sg_table = kzalloc(sizeof(*p->rw_sg_table), GFP_KERNEL); 198 p->rw_sg_table = nvgpu_kzalloc(p->sema_sea->gk20a,
199 sizeof(*p->rw_sg_table));
195 if (!p->rw_sg_table) 200 if (!p->rw_sg_table)
196 return -ENOMEM; 201 return -ENOMEM;
197 202
@@ -261,7 +266,7 @@ fail_unmap_sgt:
261fail_free_sgt: 266fail_free_sgt:
262 sg_free_table(p->rw_sg_table); 267 sg_free_table(p->rw_sg_table);
263fail: 268fail:
264 kfree(p->rw_sg_table); 269 nvgpu_kfree(p->sema_sea->gk20a, p->rw_sg_table);
265 p->rw_sg_table = NULL; 270 p->rw_sg_table = NULL;
266 gpu_sema_dbg(" %d: Failed to map semaphore pool!", p->page_idx); 271 gpu_sema_dbg(" %d: Failed to map semaphore pool!", p->page_idx);
267 return err; 272 return err;
@@ -292,7 +297,7 @@ void nvgpu_semaphore_pool_unmap(struct nvgpu_semaphore_pool *p,
292 DMA_BIDIRECTIONAL); 297 DMA_BIDIRECTIONAL);
293 298
294 sg_free_table(p->rw_sg_table); 299 sg_free_table(p->rw_sg_table);
295 kfree(p->rw_sg_table); 300 nvgpu_kfree(p->sema_sea->gk20a, p->rw_sg_table);
296 p->rw_sg_table = NULL; 301 p->rw_sg_table = NULL;
297 302
298 list_for_each_entry(hw_sema, &p->hw_semas, hw_sema_list) 303 list_for_each_entry(hw_sema, &p->hw_semas, hw_sema_list)
@@ -325,12 +330,12 @@ static void nvgpu_semaphore_pool_free(struct kref *ref)
325 __unlock_sema_sea(s); 330 __unlock_sema_sea(s);
326 331
327 list_for_each_entry_safe(hw_sema, tmp, &p->hw_semas, hw_sema_list) 332 list_for_each_entry_safe(hw_sema, tmp, &p->hw_semas, hw_sema_list)
328 kfree(hw_sema); 333 nvgpu_kfree(p->sema_sea->gk20a, hw_sema);
329 334
330 nvgpu_mutex_destroy(&p->pool_lock); 335 nvgpu_mutex_destroy(&p->pool_lock);
331 336
332 gpu_sema_dbg("Freed semaphore pool! (idx=%d)", p->page_idx); 337 gpu_sema_dbg("Freed semaphore pool! (idx=%d)", p->page_idx);
333 kfree(p); 338 nvgpu_kfree(p->sema_sea->gk20a, p);
334} 339}
335 340
336void nvgpu_semaphore_pool_get(struct nvgpu_semaphore_pool *p) 341void nvgpu_semaphore_pool_get(struct nvgpu_semaphore_pool *p)
@@ -374,7 +379,7 @@ static int __nvgpu_init_hw_sema(struct channel_gk20a *ch)
374 goto fail; 379 goto fail;
375 } 380 }
376 381
377 hw_sema = kzalloc(sizeof(struct nvgpu_semaphore_int), GFP_KERNEL); 382 hw_sema = nvgpu_kzalloc(ch->g, sizeof(struct nvgpu_semaphore_int));
378 if (!hw_sema) { 383 if (!hw_sema) {
379 ret = -ENOMEM; 384 ret = -ENOMEM;
380 goto fail_free_idx; 385 goto fail_free_idx;
@@ -417,7 +422,7 @@ void nvgpu_semaphore_free_hw_sema(struct channel_gk20a *ch)
417 422
418 /* Make sure that when the ch is re-opened it will get a new HW sema. */ 423 /* Make sure that when the ch is re-opened it will get a new HW sema. */
419 list_del(&ch->hw_sema->hw_sema_list); 424 list_del(&ch->hw_sema->hw_sema_list);
420 kfree(ch->hw_sema); 425 nvgpu_kfree(ch->g, ch->hw_sema);
421 ch->hw_sema = NULL; 426 ch->hw_sema = NULL;
422 427
423 nvgpu_mutex_release(&p->pool_lock); 428 nvgpu_mutex_release(&p->pool_lock);
@@ -440,7 +445,7 @@ struct nvgpu_semaphore *nvgpu_semaphore_alloc(struct channel_gk20a *ch)
440 return NULL; 445 return NULL;
441 } 446 }
442 447
443 s = kzalloc(sizeof(*s), GFP_KERNEL); 448 s = nvgpu_kzalloc(ch->g, sizeof(*s));
444 if (!s) 449 if (!s)
445 return NULL; 450 return NULL;
446 451
@@ -466,7 +471,7 @@ static void nvgpu_semaphore_free(struct kref *ref)
466 471
467 nvgpu_semaphore_pool_put(s->hw_sema->p); 472 nvgpu_semaphore_pool_put(s->hw_sema->p);
468 473
469 kfree(s); 474 nvgpu_kfree(s->hw_sema->ch->g, s);
470} 475}
471 476
472void nvgpu_semaphore_put(struct nvgpu_semaphore *s) 477void nvgpu_semaphore_put(struct nvgpu_semaphore *s)
diff --git a/drivers/gpu/nvgpu/include/nvgpu/allocator.h b/drivers/gpu/nvgpu/include/nvgpu/allocator.h
index 16fe2641..1bde290f 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/allocator.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/allocator.h
@@ -20,6 +20,7 @@
20#include <linux/debugfs.h> 20#include <linux/debugfs.h>
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23
23#include <nvgpu/lock.h> 24#include <nvgpu/lock.h>
24 25
25/* #define ALLOCATOR_DEBUG */ 26/* #define ALLOCATOR_DEBUG */
@@ -78,6 +79,8 @@ struct nvgpu_allocator_ops {
78}; 79};
79 80
80struct nvgpu_allocator { 81struct nvgpu_allocator {
82 struct gk20a *g;
83
81 char name[32]; 84 char name[32];
82 struct nvgpu_mutex lock; 85 struct nvgpu_mutex lock;
83 86
@@ -238,13 +241,18 @@ void nvgpu_alloc_destroy(struct nvgpu_allocator *allocator);
238void nvgpu_alloc_print_stats(struct nvgpu_allocator *a, 241void nvgpu_alloc_print_stats(struct nvgpu_allocator *a,
239 struct seq_file *s, int lock); 242 struct seq_file *s, int lock);
240 243
244static inline struct gk20a *nvgpu_alloc_to_gpu(struct nvgpu_allocator *a)
245{
246 return a->g;
247}
248
241/* 249/*
242 * Common functionality for the internals of the allocators. 250 * Common functionality for the internals of the allocators.
243 */ 251 */
244void nvgpu_init_alloc_debug(struct gk20a *g, struct nvgpu_allocator *a); 252void nvgpu_init_alloc_debug(struct gk20a *g, struct nvgpu_allocator *a);
245void nvgpu_fini_alloc_debug(struct nvgpu_allocator *a); 253void nvgpu_fini_alloc_debug(struct nvgpu_allocator *a);
246 254
247int __nvgpu_alloc_common_init(struct nvgpu_allocator *a, 255int __nvgpu_alloc_common_init(struct nvgpu_allocator *a, struct gk20a *g,
248 const char *name, void *priv, bool dbg, 256 const char *name, void *priv, bool dbg,
249 const struct nvgpu_allocator_ops *ops); 257 const struct nvgpu_allocator_ops *ops);
250 258