summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2014-10-07 08:02:35 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:12:15 -0400
commit1d9fba8804fb811771eac0f68f334f51f101ed01 (patch)
tree0be143d6fd550db0e4aba15e2ae4d76117d37ad8 /drivers/gpu/nvgpu/gk20a
parentc0668f05ea1e2429444d6aad2a40dda81aba7ec8 (diff)
gpu: nvgpu: Per-alloc alignment
Change-Id: I8b7e86afb68adf6dd33b05995d0978f42d57e7b7 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/554185 GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a_allocator.c22
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a_allocator.h11
-rw-r--r--drivers/gpu/nvgpu/gk20a/ltc_gk20a.c3
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c22
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c44
-rw-r--r--drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c8
6 files changed, 56 insertions, 54 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_allocator.c b/drivers/gpu/nvgpu/gk20a/gk20a_allocator.c
index 0b5f9f6f..fee3e4ea 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a_allocator.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a_allocator.c
@@ -20,7 +20,7 @@
20 20
21/* init allocator struct */ 21/* init allocator struct */
22int gk20a_allocator_init(struct gk20a_allocator *allocator, 22int gk20a_allocator_init(struct gk20a_allocator *allocator,
23 const char *name, u32 start, u32 len, u32 align) 23 const char *name, u32 start, u32 len)
24{ 24{
25 memset(allocator, 0, sizeof(struct gk20a_allocator)); 25 memset(allocator, 0, sizeof(struct gk20a_allocator));
26 26
@@ -28,16 +28,14 @@ int gk20a_allocator_init(struct gk20a_allocator *allocator,
28 28
29 allocator->base = start; 29 allocator->base = start;
30 allocator->limit = start + len - 1; 30 allocator->limit = start + len - 1;
31 allocator->align = align;
32 31
33 allocator->bitmap = kzalloc(BITS_TO_LONGS(len) * sizeof(long), 32 allocator->bitmap = kzalloc(BITS_TO_LONGS(len) * sizeof(long),
34 GFP_KERNEL); 33 GFP_KERNEL);
35 if (!allocator->bitmap) 34 if (!allocator->bitmap)
36 return -ENOMEM; 35 return -ENOMEM;
37 36
38 allocator_dbg(allocator, "%s : base %d, limit %d, align %d", 37 allocator_dbg(allocator, "%s : base %d, limit %d",
39 allocator->name, allocator->base, 38 allocator->name, allocator->base);
40 allocator->limit, allocator->align);
41 39
42 init_rwsem(&allocator->rw_sema); 40 init_rwsem(&allocator->rw_sema);
43 41
@@ -65,7 +63,7 @@ void gk20a_allocator_destroy(struct gk20a_allocator *allocator)
65 * contiguous address. 63 * contiguous address.
66*/ 64*/
67int gk20a_allocator_block_alloc(struct gk20a_allocator *allocator, 65int gk20a_allocator_block_alloc(struct gk20a_allocator *allocator,
68 u32 *addr, u32 len) 66 u32 *addr, u32 len, u32 align)
69{ 67{
70 unsigned long _addr; 68 unsigned long _addr;
71 69
@@ -73,11 +71,11 @@ int gk20a_allocator_block_alloc(struct gk20a_allocator *allocator,
73 71
74 if ((*addr != 0 && *addr < allocator->base) || /* check addr range */ 72 if ((*addr != 0 && *addr < allocator->base) || /* check addr range */
75 *addr + len > allocator->limit || /* check addr range */ 73 *addr + len > allocator->limit || /* check addr range */
76 *addr & (allocator->align - 1) || /* check addr alignment */ 74 *addr & (align - 1) || /* check addr alignment */
77 len == 0) /* check len */ 75 len == 0) /* check len */
78 return -EINVAL; 76 return -EINVAL;
79 77
80 len = ALIGN(len, allocator->align); 78 len = ALIGN(len, align);
81 if (!len) 79 if (!len)
82 return -ENOMEM; 80 return -ENOMEM;
83 81
@@ -87,7 +85,7 @@ int gk20a_allocator_block_alloc(struct gk20a_allocator *allocator,
87 allocator->limit - allocator->base + 1, 85 allocator->limit - allocator->base + 1,
88 *addr ? (*addr - allocator->base) : 0, 86 *addr ? (*addr - allocator->base) : 0,
89 len, 87 len,
90 allocator->align - 1); 88 align - 1);
91 if ((_addr > allocator->limit - allocator->base + 1) || 89 if ((_addr > allocator->limit - allocator->base + 1) ||
92 (*addr && *addr != (_addr + allocator->base))) { 90 (*addr && *addr != (_addr + allocator->base))) {
93 up_write(&allocator->rw_sema); 91 up_write(&allocator->rw_sema);
@@ -106,16 +104,16 @@ int gk20a_allocator_block_alloc(struct gk20a_allocator *allocator,
106 104
107/* free all blocks between start and end */ 105/* free all blocks between start and end */
108int gk20a_allocator_block_free(struct gk20a_allocator *allocator, 106int gk20a_allocator_block_free(struct gk20a_allocator *allocator,
109 u32 addr, u32 len) 107 u32 addr, u32 len, u32 align)
110{ 108{
111 allocator_dbg(allocator, "[in] addr %d, len %d", addr, len); 109 allocator_dbg(allocator, "[in] addr %d, len %d", addr, len);
112 110
113 if (addr + len > allocator->limit || /* check addr range */ 111 if (addr + len > allocator->limit || /* check addr range */
114 addr < allocator->base || 112 addr < allocator->base ||
115 addr & (allocator->align - 1)) /* check addr alignment */ 113 addr & (align - 1)) /* check addr alignment */
116 return -EINVAL; 114 return -EINVAL;
117 115
118 len = ALIGN(len, allocator->align); 116 len = ALIGN(len, align);
119 if (!len) 117 if (!len)
120 return -EINVAL; 118 return -EINVAL;
121 119
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_allocator.h b/drivers/gpu/nvgpu/gk20a/gk20a_allocator.h
index 154f953a..69a227bd 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a_allocator.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a_allocator.h
@@ -31,7 +31,6 @@ struct gk20a_allocator {
31 31
32 u32 base; /* min value of this linear space */ 32 u32 base; /* min value of this linear space */
33 u32 limit; /* max value = limit - 1 */ 33 u32 limit; /* max value = limit - 1 */
34 u32 align; /* alignment size, power of 2 */
35 34
36 unsigned long *bitmap; /* bitmap */ 35 unsigned long *bitmap; /* bitmap */
37 36
@@ -58,21 +57,21 @@ struct gk20a_allocator {
58 } constraint; 57 } constraint;
59 58
60 int (*alloc)(struct gk20a_allocator *allocator, 59 int (*alloc)(struct gk20a_allocator *allocator,
61 u32 *addr, u32 len); 60 u32 *addr, u32 len, u32 align);
62 int (*free)(struct gk20a_allocator *allocator, 61 int (*free)(struct gk20a_allocator *allocator,
63 u32 addr, u32 len); 62 u32 addr, u32 len, u32 align);
64 63
65}; 64};
66 65
67int gk20a_allocator_init(struct gk20a_allocator *allocator, 66int gk20a_allocator_init(struct gk20a_allocator *allocator,
68 const char *name, u32 base, u32 size, u32 align); 67 const char *name, u32 base, u32 size);
69void gk20a_allocator_destroy(struct gk20a_allocator *allocator); 68void gk20a_allocator_destroy(struct gk20a_allocator *allocator);
70 69
71int gk20a_allocator_block_alloc(struct gk20a_allocator *allocator, 70int gk20a_allocator_block_alloc(struct gk20a_allocator *allocator,
72 u32 *addr, u32 len); 71 u32 *addr, u32 len, u32 align);
73 72
74int gk20a_allocator_block_free(struct gk20a_allocator *allocator, 73int gk20a_allocator_block_free(struct gk20a_allocator *allocator,
75 u32 addr, u32 len); 74 u32 addr, u32 len, u32 align);
76 75
77#if defined(ALLOCATOR_DEBUG) 76#if defined(ALLOCATOR_DEBUG)
78 77
diff --git a/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c b/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c
index 71d87b5c..4c6543bb 100644
--- a/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c
@@ -92,8 +92,7 @@ static int gk20a_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
92 92
93 gk20a_allocator_init(&gr->comp_tags, "comptag", 93 gk20a_allocator_init(&gr->comp_tags, "comptag",
94 1, /* start */ 94 1, /* start */
95 max_comptag_lines - 1, /* length*/ 95 max_comptag_lines - 1); /* length*/
96 1); /* align */
97 96
98 gr->comptags_per_cacheline = comptags_per_cacheline; 97 gr->comptags_per_cacheline = comptags_per_cacheline;
99 gr->slices_per_ltc = slices_per_fbp / g->ltc_count; 98 gr->slices_per_ltc = slices_per_fbp / g->ltc_count;
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index e4186c95..f588d112 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -129,7 +129,7 @@ static void gk20a_mm_delete_priv(void *_priv)
129 BUG_ON(!priv->comptag_allocator); 129 BUG_ON(!priv->comptag_allocator);
130 priv->comptag_allocator->free(priv->comptag_allocator, 130 priv->comptag_allocator->free(priv->comptag_allocator,
131 priv->comptags.offset, 131 priv->comptags.offset,
132 priv->comptags.lines); 132 priv->comptags.lines, 1);
133 } 133 }
134 134
135 /* Free buffer states */ 135 /* Free buffer states */
@@ -229,7 +229,7 @@ static int gk20a_alloc_comptags(struct device *dev,
229 229
230 /* store the allocator so we can use it when we free the ctags */ 230 /* store the allocator so we can use it when we free the ctags */
231 priv->comptag_allocator = allocator; 231 priv->comptag_allocator = allocator;
232 err = allocator->alloc(allocator, &offset, lines); 232 err = allocator->alloc(allocator, &offset, lines, 1);
233 if (!err) { 233 if (!err) {
234 priv->comptags.lines = lines; 234 priv->comptags.lines = lines;
235 priv->comptags.offset = offset; 235 priv->comptags.offset = offset;
@@ -837,7 +837,7 @@ u64 gk20a_vm_alloc_va(struct vm_gk20a *vm,
837 /* The vma allocator represents page accounting. */ 837 /* The vma allocator represents page accounting. */
838 num_pages = size >> ilog2(vm->gmmu_page_sizes[gmmu_pgsz_idx]); 838 num_pages = size >> ilog2(vm->gmmu_page_sizes[gmmu_pgsz_idx]);
839 839
840 err = vma->alloc(vma, &start_page_nr, num_pages); 840 err = vma->alloc(vma, &start_page_nr, num_pages, 1);
841 841
842 if (err) { 842 if (err) {
843 gk20a_err(dev_from_vm(vm), 843 gk20a_err(dev_from_vm(vm),
@@ -868,7 +868,7 @@ int gk20a_vm_free_va(struct vm_gk20a *vm,
868 start_page_nr = (u32)(offset >> page_shift); 868 start_page_nr = (u32)(offset >> page_shift);
869 num_pages = (u32)((size + page_size - 1) >> page_shift); 869 num_pages = (u32)((size + page_size - 1) >> page_shift);
870 870
871 err = vma->free(vma, start_page_nr, num_pages); 871 err = vma->free(vma, start_page_nr, num_pages, 1);
872 if (err) { 872 if (err) {
873 gk20a_err(dev_from_vm(vm), 873 gk20a_err(dev_from_vm(vm),
874 "not found: offset=0x%llx, sz=0x%llx", 874 "not found: offset=0x%llx, sz=0x%llx",
@@ -2290,9 +2290,8 @@ static int gk20a_init_vm(struct mm_gk20a *mm,
2290 vm->gmmu_page_sizes[gmmu_page_size_small]>>10); 2290 vm->gmmu_page_sizes[gmmu_page_size_small]>>10);
2291 err = gk20a_allocator_init(&vm->vma[gmmu_page_size_small], 2291 err = gk20a_allocator_init(&vm->vma[gmmu_page_size_small],
2292 alloc_name, 2292 alloc_name,
2293 low_hole_pages, /*start*/ 2293 low_hole_pages, /*start*/
2294 num_pages - low_hole_pages,/* length*/ 2294 num_pages - low_hole_pages);/* length*/
2295 1); /* align */
2296 if (err) 2295 if (err)
2297 goto clean_up_map_pde; 2296 goto clean_up_map_pde;
2298 2297
@@ -2305,8 +2304,7 @@ static int gk20a_init_vm(struct mm_gk20a *mm,
2305 err = gk20a_allocator_init(&vm->vma[gmmu_page_size_big], 2304 err = gk20a_allocator_init(&vm->vma[gmmu_page_size_big],
2306 alloc_name, 2305 alloc_name,
2307 num_pages, /* start */ 2306 num_pages, /* start */
2308 num_pages, /* length */ 2307 num_pages); /* length */
2309 1); /* align */
2310 if (err) 2308 if (err)
2311 goto clean_up_small_allocator; 2309 goto clean_up_small_allocator;
2312 } 2310 }
@@ -2435,7 +2433,7 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
2435 ilog2(vm->gmmu_page_sizes[pgsz_idx])); 2433 ilog2(vm->gmmu_page_sizes[pgsz_idx]));
2436 2434
2437 vma = &vm->vma[pgsz_idx]; 2435 vma = &vm->vma[pgsz_idx];
2438 err = vma->alloc(vma, &start_page_nr, args->pages); 2436 err = vma->alloc(vma, &start_page_nr, args->pages, 1);
2439 if (err) { 2437 if (err) {
2440 kfree(va_node); 2438 kfree(va_node);
2441 goto clean_up; 2439 goto clean_up;
@@ -2458,7 +2456,7 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
2458 pgsz_idx, true); 2456 pgsz_idx, true);
2459 if (err) { 2457 if (err) {
2460 mutex_unlock(&vm->update_gmmu_lock); 2458 mutex_unlock(&vm->update_gmmu_lock);
2461 vma->free(vma, start_page_nr, args->pages); 2459 vma->free(vma, start_page_nr, args->pages, 1);
2462 kfree(va_node); 2460 kfree(va_node);
2463 goto clean_up; 2461 goto clean_up;
2464 } 2462 }
@@ -2506,7 +2504,7 @@ int gk20a_vm_free_space(struct gk20a_as_share *as_share,
2506 ilog2(vm->gmmu_page_sizes[pgsz_idx])); 2504 ilog2(vm->gmmu_page_sizes[pgsz_idx]));
2507 2505
2508 vma = &vm->vma[pgsz_idx]; 2506 vma = &vm->vma[pgsz_idx];
2509 err = vma->free(vma, start_page_nr, args->pages); 2507 err = vma->free(vma, start_page_nr, args->pages, 1);
2510 2508
2511 if (err) 2509 if (err)
2512 goto clean_up; 2510 goto clean_up;
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 47ee7a1b..5d973938 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -2603,7 +2603,8 @@ static int pmu_init_perfmon(struct pmu_gk20a *pmu)
2603 2603
2604 if (!pmu->sample_buffer) 2604 if (!pmu->sample_buffer)
2605 err = pmu->dmem.alloc(&pmu->dmem, 2605 err = pmu->dmem.alloc(&pmu->dmem,
2606 &pmu->sample_buffer, 2 * sizeof(u16)); 2606 &pmu->sample_buffer, 2 * sizeof(u16),
2607 PMU_DMEM_ALLOC_ALIGNMENT);
2607 if (err) { 2608 if (err) {
2608 gk20a_err(dev_from_gk20a(g), 2609 gk20a_err(dev_from_gk20a(g),
2609 "failed to allocate perfmon sample buffer"); 2610 "failed to allocate perfmon sample buffer");
@@ -2707,8 +2708,7 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu,
2707 if (!pmu->dmem.alloc) 2708 if (!pmu->dmem.alloc)
2708 gk20a_allocator_init(&pmu->dmem, "gk20a_pmu_dmem", 2709 gk20a_allocator_init(&pmu->dmem, "gk20a_pmu_dmem",
2709 pv->get_pmu_init_msg_pmu_sw_mg_off(init), 2710 pv->get_pmu_init_msg_pmu_sw_mg_off(init),
2710 pv->get_pmu_init_msg_pmu_sw_mg_size(init), 2711 pv->get_pmu_init_msg_pmu_sw_mg_size(init));
2711 PMU_DMEM_ALLOC_ALIGNMENT);
2712 2712
2713 pmu->pmu_ready = true; 2713 pmu->pmu_ready = true;
2714 pmu->pmu_state = PMU_STATE_INIT_RECEIVED; 2714 pmu->pmu_state = PMU_STATE_INIT_RECEIVED;
@@ -2845,17 +2845,19 @@ static int pmu_response_handle(struct pmu_gk20a *pmu,
2845 if (pv->pmu_allocation_get_dmem_size(pmu, 2845 if (pv->pmu_allocation_get_dmem_size(pmu,
2846 pv->get_pmu_seq_in_a_ptr(seq)) != 0) 2846 pv->get_pmu_seq_in_a_ptr(seq)) != 0)
2847 pmu->dmem.free(&pmu->dmem, 2847 pmu->dmem.free(&pmu->dmem,
2848 pv->pmu_allocation_get_dmem_offset(pmu, 2848 pv->pmu_allocation_get_dmem_offset(pmu,
2849 pv->get_pmu_seq_in_a_ptr(seq)), 2849 pv->get_pmu_seq_in_a_ptr(seq)),
2850 pv->pmu_allocation_get_dmem_size(pmu, 2850 pv->pmu_allocation_get_dmem_size(pmu,
2851 pv->get_pmu_seq_in_a_ptr(seq))); 2851 pv->get_pmu_seq_in_a_ptr(seq)),
2852 PMU_DMEM_ALLOC_ALIGNMENT);
2852 if (pv->pmu_allocation_get_dmem_size(pmu, 2853 if (pv->pmu_allocation_get_dmem_size(pmu,
2853 pv->get_pmu_seq_out_a_ptr(seq)) != 0) 2854 pv->get_pmu_seq_out_a_ptr(seq)) != 0)
2854 pmu->dmem.free(&pmu->dmem, 2855 pmu->dmem.free(&pmu->dmem,
2855 pv->pmu_allocation_get_dmem_offset(pmu, 2856 pv->pmu_allocation_get_dmem_offset(pmu,
2856 pv->get_pmu_seq_out_a_ptr(seq)), 2857 pv->get_pmu_seq_out_a_ptr(seq)),
2857 pv->pmu_allocation_get_dmem_size(pmu, 2858 pv->pmu_allocation_get_dmem_size(pmu,
2858 pv->get_pmu_seq_out_a_ptr(seq))); 2859 pv->get_pmu_seq_out_a_ptr(seq)),
2860 PMU_DMEM_ALLOC_ALIGNMENT);
2859 2861
2860 if (seq->callback) 2862 if (seq->callback)
2861 seq->callback(g, msg, seq->cb_params, seq->desc, ret); 2863 seq->callback(g, msg, seq->cb_params, seq->desc, ret);
@@ -3493,8 +3495,9 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
3493 (u16)max(payload->in.size, payload->out.size)); 3495 (u16)max(payload->in.size, payload->out.size));
3494 3496
3495 err = pmu->dmem.alloc(&pmu->dmem, 3497 err = pmu->dmem.alloc(&pmu->dmem,
3496 pv->pmu_allocation_get_dmem_offset_addr(pmu, in), 3498 pv->pmu_allocation_get_dmem_offset_addr(pmu, in),
3497 pv->pmu_allocation_get_dmem_size(pmu, in)); 3499 pv->pmu_allocation_get_dmem_size(pmu, in),
3500 PMU_DMEM_ALLOC_ALIGNMENT);
3498 if (err) 3501 if (err)
3499 goto clean_up; 3502 goto clean_up;
3500 3503
@@ -3517,8 +3520,9 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
3517 3520
3518 if (payload->out.buf != payload->in.buf) { 3521 if (payload->out.buf != payload->in.buf) {
3519 err = pmu->dmem.alloc(&pmu->dmem, 3522 err = pmu->dmem.alloc(&pmu->dmem,
3520 pv->pmu_allocation_get_dmem_offset_addr(pmu, out), 3523 pv->pmu_allocation_get_dmem_offset_addr(pmu, out),
3521 pv->pmu_allocation_get_dmem_size(pmu, out)); 3524 pv->pmu_allocation_get_dmem_size(pmu, out),
3525 PMU_DMEM_ALLOC_ALIGNMENT);
3522 if (err) 3526 if (err)
3523 goto clean_up; 3527 goto clean_up;
3524 } else { 3528 } else {
@@ -3548,12 +3552,14 @@ clean_up:
3548 gk20a_dbg_fn("fail"); 3552 gk20a_dbg_fn("fail");
3549 if (in) 3553 if (in)
3550 pmu->dmem.free(&pmu->dmem, 3554 pmu->dmem.free(&pmu->dmem,
3551 pv->pmu_allocation_get_dmem_offset(pmu, in), 3555 pv->pmu_allocation_get_dmem_offset(pmu, in),
3552 pv->pmu_allocation_get_dmem_size(pmu, in)); 3556 pv->pmu_allocation_get_dmem_size(pmu, in),
3557 PMU_DMEM_ALLOC_ALIGNMENT);
3553 if (out) 3558 if (out)
3554 pmu->dmem.free(&pmu->dmem, 3559 pmu->dmem.free(&pmu->dmem,
3555 pv->pmu_allocation_get_dmem_offset(pmu, out), 3560 pv->pmu_allocation_get_dmem_offset(pmu, out),
3556 pv->pmu_allocation_get_dmem_size(pmu, out)); 3561 pv->pmu_allocation_get_dmem_size(pmu, out),
3562 PMU_DMEM_ALLOC_ALIGNMENT);
3557 3563
3558 pmu_seq_release(pmu, seq); 3564 pmu_seq_release(pmu, seq);
3559 return err; 3565 return err;
diff --git a/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c b/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c
index 493c7b63..04f61c58 100644
--- a/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/semaphore_gk20a.c
@@ -45,7 +45,7 @@ struct gk20a_semaphore_pool *gk20a_semaphore_pool_alloc(struct device *d,
45 goto clean_up; 45 goto clean_up;
46 46
47 if (gk20a_allocator_init(&p->alloc, unique_name, 0, 47 if (gk20a_allocator_init(&p->alloc, unique_name, 0,
48 p->size, SEMAPHORE_SIZE)) 48 p->size))
49 goto clean_up; 49 goto clean_up;
50 50
51 gk20a_dbg_info("cpuva=%p iova=%llx phys=%llx", p->cpu_va, 51 gk20a_dbg_info("cpuva=%p iova=%llx phys=%llx", p->cpu_va,
@@ -163,7 +163,8 @@ struct gk20a_semaphore *gk20a_semaphore_alloc(struct gk20a_semaphore_pool *pool)
163 if (!s) 163 if (!s)
164 return NULL; 164 return NULL;
165 165
166 if (pool->alloc.alloc(&pool->alloc, &s->offset, SEMAPHORE_SIZE)) { 166 if (pool->alloc.alloc(&pool->alloc, &s->offset, SEMAPHORE_SIZE,
167 SEMAPHORE_SIZE)) {
167 gk20a_err(pool->dev, "failed to allocate semaphore"); 168 gk20a_err(pool->dev, "failed to allocate semaphore");
168 kfree(s); 169 kfree(s);
169 return NULL; 170 return NULL;
@@ -185,7 +186,8 @@ static void gk20a_semaphore_free(struct kref *ref)
185 struct gk20a_semaphore *s = 186 struct gk20a_semaphore *s =
186 container_of(ref, struct gk20a_semaphore, ref); 187 container_of(ref, struct gk20a_semaphore, ref);
187 188
188 s->pool->alloc.free(&s->pool->alloc, s->offset, SEMAPHORE_SIZE); 189 s->pool->alloc.free(&s->pool->alloc, s->offset, SEMAPHORE_SIZE,
190 SEMAPHORE_SIZE);
189 gk20a_semaphore_pool_put(s->pool); 191 gk20a_semaphore_pool_put(s->pool);
190 kfree(s); 192 kfree(s);
191} 193}