diff options
author | Alex Waterman <alexw@nvidia.com> | 2017-03-08 19:51:33 -0500 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-03-26 12:55:10 -0400 |
commit | c11228d48be1825e1ec84afd38c6938504fa4100 (patch) | |
tree | ea8bb9c874ba14b7c06a4de11d6619f88e2a4104 /drivers/gpu/nvgpu/common/semaphore.c | |
parent | e0f2afe5eb43fb32490ccabd504879c3e3e54623 (diff) |
gpu: nvgpu: Use new kmem API functions (common/*)
Use the new kmem API functions in common/* and common/mm/*.
Add a struct gk20a pointer to struct nvgpu_allocator in order
to store the gk20a pointer used for allocating memory.
Bug 1799159
Bug 1823380
Change-Id: I881ea9545e8a8f0b75d77a1e35dd1812e0bb654e
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: http://git-master/r/1318315
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/semaphore.c')
-rw-r--r-- | drivers/gpu/nvgpu/common/semaphore.c | 31 |
1 files changed, 18 insertions, 13 deletions
diff --git a/drivers/gpu/nvgpu/common/semaphore.c b/drivers/gpu/nvgpu/common/semaphore.c index ff86ada9..675794d1 100644 --- a/drivers/gpu/nvgpu/common/semaphore.c +++ b/drivers/gpu/nvgpu/common/semaphore.c | |||
@@ -20,6 +20,10 @@ | |||
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | 21 | ||
22 | #include <nvgpu/semaphore.h> | 22 | #include <nvgpu/semaphore.h> |
23 | #include <nvgpu/kmem.h> | ||
24 | |||
25 | #include "gk20a/gk20a.h" | ||
26 | #include "gk20a/mm_gk20a.h" | ||
23 | 27 | ||
24 | #define __lock_sema_sea(s) \ | 28 | #define __lock_sema_sea(s) \ |
25 | do { \ | 29 | do { \ |
@@ -83,7 +87,7 @@ struct nvgpu_semaphore_sea *nvgpu_semaphore_sea_create(struct gk20a *g) | |||
83 | if (g->sema_sea) | 87 | if (g->sema_sea) |
84 | return g->sema_sea; | 88 | return g->sema_sea; |
85 | 89 | ||
86 | g->sema_sea = kzalloc(sizeof(*g->sema_sea), GFP_KERNEL); | 90 | g->sema_sea = nvgpu_kzalloc(g, sizeof(*g->sema_sea)); |
87 | if (!g->sema_sea) | 91 | if (!g->sema_sea) |
88 | return NULL; | 92 | return NULL; |
89 | 93 | ||
@@ -103,7 +107,7 @@ struct nvgpu_semaphore_sea *nvgpu_semaphore_sea_create(struct gk20a *g) | |||
103 | cleanup_destroy: | 107 | cleanup_destroy: |
104 | nvgpu_mutex_destroy(&g->sema_sea->sea_lock); | 108 | nvgpu_mutex_destroy(&g->sema_sea->sea_lock); |
105 | cleanup_free: | 109 | cleanup_free: |
106 | kfree(g->sema_sea); | 110 | nvgpu_kfree(g, g->sema_sea); |
107 | g->sema_sea = NULL; | 111 | g->sema_sea = NULL; |
108 | gpu_sema_dbg("Failed to creat semaphore sea!"); | 112 | gpu_sema_dbg("Failed to creat semaphore sea!"); |
109 | return NULL; | 113 | return NULL; |
@@ -131,7 +135,7 @@ struct nvgpu_semaphore_pool *nvgpu_semaphore_pool_alloc( | |||
131 | unsigned long page_idx; | 135 | unsigned long page_idx; |
132 | int ret, err = 0; | 136 | int ret, err = 0; |
133 | 137 | ||
134 | p = kzalloc(sizeof(*p), GFP_KERNEL); | 138 | p = nvgpu_kzalloc(sea->gk20a, sizeof(*p)); |
135 | if (!p) | 139 | if (!p) |
136 | return ERR_PTR(-ENOMEM); | 140 | return ERR_PTR(-ENOMEM); |
137 | 141 | ||
@@ -168,7 +172,7 @@ fail_alloc: | |||
168 | nvgpu_mutex_destroy(&p->pool_lock); | 172 | nvgpu_mutex_destroy(&p->pool_lock); |
169 | fail: | 173 | fail: |
170 | __unlock_sema_sea(sea); | 174 | __unlock_sema_sea(sea); |
171 | kfree(p); | 175 | nvgpu_kfree(sea->gk20a, p); |
172 | gpu_sema_dbg("Failed to allocate semaphore pool!"); | 176 | gpu_sema_dbg("Failed to allocate semaphore pool!"); |
173 | return ERR_PTR(err); | 177 | return ERR_PTR(err); |
174 | } | 178 | } |
@@ -191,7 +195,8 @@ int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p, | |||
191 | gpu_sema_dbg(" %d: CPU VA = 0x%p!", p->page_idx, p->cpu_va); | 195 | gpu_sema_dbg(" %d: CPU VA = 0x%p!", p->page_idx, p->cpu_va); |
192 | 196 | ||
193 | /* First do the RW mapping. */ | 197 | /* First do the RW mapping. */ |
194 | p->rw_sg_table = kzalloc(sizeof(*p->rw_sg_table), GFP_KERNEL); | 198 | p->rw_sg_table = nvgpu_kzalloc(p->sema_sea->gk20a, |
199 | sizeof(*p->rw_sg_table)); | ||
195 | if (!p->rw_sg_table) | 200 | if (!p->rw_sg_table) |
196 | return -ENOMEM; | 201 | return -ENOMEM; |
197 | 202 | ||
@@ -261,7 +266,7 @@ fail_unmap_sgt: | |||
261 | fail_free_sgt: | 266 | fail_free_sgt: |
262 | sg_free_table(p->rw_sg_table); | 267 | sg_free_table(p->rw_sg_table); |
263 | fail: | 268 | fail: |
264 | kfree(p->rw_sg_table); | 269 | nvgpu_kfree(p->sema_sea->gk20a, p->rw_sg_table); |
265 | p->rw_sg_table = NULL; | 270 | p->rw_sg_table = NULL; |
266 | gpu_sema_dbg(" %d: Failed to map semaphore pool!", p->page_idx); | 271 | gpu_sema_dbg(" %d: Failed to map semaphore pool!", p->page_idx); |
267 | return err; | 272 | return err; |
@@ -292,7 +297,7 @@ void nvgpu_semaphore_pool_unmap(struct nvgpu_semaphore_pool *p, | |||
292 | DMA_BIDIRECTIONAL); | 297 | DMA_BIDIRECTIONAL); |
293 | 298 | ||
294 | sg_free_table(p->rw_sg_table); | 299 | sg_free_table(p->rw_sg_table); |
295 | kfree(p->rw_sg_table); | 300 | nvgpu_kfree(p->sema_sea->gk20a, p->rw_sg_table); |
296 | p->rw_sg_table = NULL; | 301 | p->rw_sg_table = NULL; |
297 | 302 | ||
298 | list_for_each_entry(hw_sema, &p->hw_semas, hw_sema_list) | 303 | list_for_each_entry(hw_sema, &p->hw_semas, hw_sema_list) |
@@ -325,12 +330,12 @@ static void nvgpu_semaphore_pool_free(struct kref *ref) | |||
325 | __unlock_sema_sea(s); | 330 | __unlock_sema_sea(s); |
326 | 331 | ||
327 | list_for_each_entry_safe(hw_sema, tmp, &p->hw_semas, hw_sema_list) | 332 | list_for_each_entry_safe(hw_sema, tmp, &p->hw_semas, hw_sema_list) |
328 | kfree(hw_sema); | 333 | nvgpu_kfree(p->sema_sea->gk20a, hw_sema); |
329 | 334 | ||
330 | nvgpu_mutex_destroy(&p->pool_lock); | 335 | nvgpu_mutex_destroy(&p->pool_lock); |
331 | 336 | ||
332 | gpu_sema_dbg("Freed semaphore pool! (idx=%d)", p->page_idx); | 337 | gpu_sema_dbg("Freed semaphore pool! (idx=%d)", p->page_idx); |
333 | kfree(p); | 338 | nvgpu_kfree(p->sema_sea->gk20a, p); |
334 | } | 339 | } |
335 | 340 | ||
336 | void nvgpu_semaphore_pool_get(struct nvgpu_semaphore_pool *p) | 341 | void nvgpu_semaphore_pool_get(struct nvgpu_semaphore_pool *p) |
@@ -374,7 +379,7 @@ static int __nvgpu_init_hw_sema(struct channel_gk20a *ch) | |||
374 | goto fail; | 379 | goto fail; |
375 | } | 380 | } |
376 | 381 | ||
377 | hw_sema = kzalloc(sizeof(struct nvgpu_semaphore_int), GFP_KERNEL); | 382 | hw_sema = nvgpu_kzalloc(ch->g, sizeof(struct nvgpu_semaphore_int)); |
378 | if (!hw_sema) { | 383 | if (!hw_sema) { |
379 | ret = -ENOMEM; | 384 | ret = -ENOMEM; |
380 | goto fail_free_idx; | 385 | goto fail_free_idx; |
@@ -417,7 +422,7 @@ void nvgpu_semaphore_free_hw_sema(struct channel_gk20a *ch) | |||
417 | 422 | ||
418 | /* Make sure that when the ch is re-opened it will get a new HW sema. */ | 423 | /* Make sure that when the ch is re-opened it will get a new HW sema. */ |
419 | list_del(&ch->hw_sema->hw_sema_list); | 424 | list_del(&ch->hw_sema->hw_sema_list); |
420 | kfree(ch->hw_sema); | 425 | nvgpu_kfree(ch->g, ch->hw_sema); |
421 | ch->hw_sema = NULL; | 426 | ch->hw_sema = NULL; |
422 | 427 | ||
423 | nvgpu_mutex_release(&p->pool_lock); | 428 | nvgpu_mutex_release(&p->pool_lock); |
@@ -440,7 +445,7 @@ struct nvgpu_semaphore *nvgpu_semaphore_alloc(struct channel_gk20a *ch) | |||
440 | return NULL; | 445 | return NULL; |
441 | } | 446 | } |
442 | 447 | ||
443 | s = kzalloc(sizeof(*s), GFP_KERNEL); | 448 | s = nvgpu_kzalloc(ch->g, sizeof(*s)); |
444 | if (!s) | 449 | if (!s) |
445 | return NULL; | 450 | return NULL; |
446 | 451 | ||
@@ -466,7 +471,7 @@ static void nvgpu_semaphore_free(struct kref *ref) | |||
466 | 471 | ||
467 | nvgpu_semaphore_pool_put(s->hw_sema->p); | 472 | nvgpu_semaphore_pool_put(s->hw_sema->p); |
468 | 473 | ||
469 | kfree(s); | 474 | nvgpu_kfree(s->hw_sema->ch->g, s); |
470 | } | 475 | } |
471 | 476 | ||
472 | void nvgpu_semaphore_put(struct nvgpu_semaphore *s) | 477 | void nvgpu_semaphore_put(struct nvgpu_semaphore *s) |