summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/semaphore.c
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-20 05:13:41 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-23 00:55:49 -0400
commit3fbaee7099039eee84343027dd1ce20679c0c113 (patch)
tree0de4934723f58cad9cdcdb642927ffce0cfac6d8 /drivers/gpu/nvgpu/common/semaphore.c
parent52305f0514d29e7fb2cb5e2154188e09faa3fe94 (diff)
gpu: nvgpu: common: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: I4d9933c51a297a725f48cbb15520a70494d74aeb Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1800833 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/semaphore.c')
-rw-r--r--drivers/gpu/nvgpu/common/semaphore.c45
1 files changed, 30 insertions, 15 deletions
diff --git a/drivers/gpu/nvgpu/common/semaphore.c b/drivers/gpu/nvgpu/common/semaphore.c
index 65aeb9eb..25bd3be3 100644
--- a/drivers/gpu/nvgpu/common/semaphore.c
+++ b/drivers/gpu/nvgpu/common/semaphore.c
@@ -66,8 +66,9 @@ static int __nvgpu_semaphore_sea_grow(struct nvgpu_semaphore_sea *sea)
66 ret = nvgpu_dma_alloc_sys(gk20a, 66 ret = nvgpu_dma_alloc_sys(gk20a,
67 PAGE_SIZE * SEMAPHORE_POOL_COUNT, 67 PAGE_SIZE * SEMAPHORE_POOL_COUNT,
68 &sea->sea_mem); 68 &sea->sea_mem);
69 if (ret) 69 if (ret) {
70 goto out; 70 goto out;
71 }
71 72
72 sea->size = SEMAPHORE_POOL_COUNT; 73 sea->size = SEMAPHORE_POOL_COUNT;
73 sea->map_size = SEMAPHORE_POOL_COUNT * PAGE_SIZE; 74 sea->map_size = SEMAPHORE_POOL_COUNT * PAGE_SIZE;
@@ -88,8 +89,9 @@ out:
88 89
89void nvgpu_semaphore_sea_destroy(struct gk20a *g) 90void nvgpu_semaphore_sea_destroy(struct gk20a *g)
90{ 91{
91 if (!g->sema_sea) 92 if (!g->sema_sea) {
92 return; 93 return;
94 }
93 95
94 nvgpu_dma_free(g, &g->sema_sea->sea_mem); 96 nvgpu_dma_free(g, &g->sema_sea->sea_mem);
95 nvgpu_mutex_destroy(&g->sema_sea->sea_lock); 97 nvgpu_mutex_destroy(&g->sema_sea->sea_lock);
@@ -103,22 +105,26 @@ void nvgpu_semaphore_sea_destroy(struct gk20a *g)
103 */ 105 */
104struct nvgpu_semaphore_sea *nvgpu_semaphore_sea_create(struct gk20a *g) 106struct nvgpu_semaphore_sea *nvgpu_semaphore_sea_create(struct gk20a *g)
105{ 107{
106 if (g->sema_sea) 108 if (g->sema_sea) {
107 return g->sema_sea; 109 return g->sema_sea;
110 }
108 111
109 g->sema_sea = nvgpu_kzalloc(g, sizeof(*g->sema_sea)); 112 g->sema_sea = nvgpu_kzalloc(g, sizeof(*g->sema_sea));
110 if (!g->sema_sea) 113 if (!g->sema_sea) {
111 return NULL; 114 return NULL;
115 }
112 116
113 g->sema_sea->size = 0; 117 g->sema_sea->size = 0;
114 g->sema_sea->page_count = 0; 118 g->sema_sea->page_count = 0;
115 g->sema_sea->gk20a = g; 119 g->sema_sea->gk20a = g;
116 nvgpu_init_list_node(&g->sema_sea->pool_list); 120 nvgpu_init_list_node(&g->sema_sea->pool_list);
117 if (nvgpu_mutex_init(&g->sema_sea->sea_lock)) 121 if (nvgpu_mutex_init(&g->sema_sea->sea_lock)) {
118 goto cleanup_free; 122 goto cleanup_free;
123 }
119 124
120 if (__nvgpu_semaphore_sea_grow(g->sema_sea)) 125 if (__nvgpu_semaphore_sea_grow(g->sema_sea)) {
121 goto cleanup_destroy; 126 goto cleanup_destroy;
127 }
122 128
123 gpu_sema_dbg(g, "Created semaphore sea!"); 129 gpu_sema_dbg(g, "Created semaphore sea!");
124 return g->sema_sea; 130 return g->sema_sea;
@@ -136,8 +142,9 @@ static int __semaphore_bitmap_alloc(unsigned long *bitmap, unsigned long len)
136{ 142{
137 unsigned long idx = find_first_zero_bit(bitmap, len); 143 unsigned long idx = find_first_zero_bit(bitmap, len);
138 144
139 if (idx == len) 145 if (idx == len) {
140 return -ENOSPC; 146 return -ENOSPC;
147 }
141 148
142 set_bit(idx, bitmap); 149 set_bit(idx, bitmap);
143 150
@@ -155,19 +162,22 @@ int nvgpu_semaphore_pool_alloc(struct nvgpu_semaphore_sea *sea,
155 int ret; 162 int ret;
156 163
157 p = nvgpu_kzalloc(sea->gk20a, sizeof(*p)); 164 p = nvgpu_kzalloc(sea->gk20a, sizeof(*p));
158 if (!p) 165 if (!p) {
159 return -ENOMEM; 166 return -ENOMEM;
167 }
160 168
161 __lock_sema_sea(sea); 169 __lock_sema_sea(sea);
162 170
163 ret = nvgpu_mutex_init(&p->pool_lock); 171 ret = nvgpu_mutex_init(&p->pool_lock);
164 if (ret) 172 if (ret) {
165 goto fail; 173 goto fail;
174 }
166 175
167 ret = __semaphore_bitmap_alloc(sea->pools_alloced, 176 ret = __semaphore_bitmap_alloc(sea->pools_alloced,
168 SEMAPHORE_POOL_COUNT); 177 SEMAPHORE_POOL_COUNT);
169 if (ret < 0) 178 if (ret < 0) {
170 goto fail_alloc; 179 goto fail_alloc;
180 }
171 181
172 page_idx = (unsigned long)ret; 182 page_idx = (unsigned long)ret;
173 183
@@ -205,8 +215,9 @@ int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p,
205 int err = 0; 215 int err = 0;
206 u64 addr; 216 u64 addr;
207 217
208 if (p->mapped) 218 if (p->mapped) {
209 return -EBUSY; 219 return -EBUSY;
220 }
210 221
211 gpu_sema_dbg(pool_to_gk20a(p), 222 gpu_sema_dbg(pool_to_gk20a(p),
212 "Mapping semaphore pool! (idx=%d)", p->page_idx); 223 "Mapping semaphore pool! (idx=%d)", p->page_idx);
@@ -242,8 +253,9 @@ int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *p,
242 err = nvgpu_mem_create_from_mem(vm->mm->g, 253 err = nvgpu_mem_create_from_mem(vm->mm->g,
243 &p->rw_mem, &p->sema_sea->sea_mem, 254 &p->rw_mem, &p->sema_sea->sea_mem,
244 p->page_idx, 1); 255 p->page_idx, 1);
245 if (err) 256 if (err) {
246 goto fail_unmap; 257 goto fail_unmap;
258 }
247 259
248 addr = nvgpu_gmmu_map(vm, &p->rw_mem, SZ_4K, 0, 260 addr = nvgpu_gmmu_map(vm, &p->rw_mem, SZ_4K, 0,
249 gk20a_mem_flag_none, 0, 261 gk20a_mem_flag_none, 0,
@@ -342,8 +354,9 @@ void nvgpu_semaphore_pool_put(struct nvgpu_semaphore_pool *p)
342 */ 354 */
343u64 __nvgpu_semaphore_pool_gpu_va(struct nvgpu_semaphore_pool *p, bool global) 355u64 __nvgpu_semaphore_pool_gpu_va(struct nvgpu_semaphore_pool *p, bool global)
344{ 356{
345 if (!global) 357 if (!global) {
346 return p->gpu_va; 358 return p->gpu_va;
359 }
347 360
348 return p->gpu_va_ro + (PAGE_SIZE * p->page_idx); 361 return p->gpu_va_ro + (PAGE_SIZE * p->page_idx);
349} 362}
@@ -427,13 +440,15 @@ struct nvgpu_semaphore *nvgpu_semaphore_alloc(struct channel_gk20a *ch)
427 440
428 if (!ch->hw_sema) { 441 if (!ch->hw_sema) {
429 ret = __nvgpu_init_hw_sema(ch); 442 ret = __nvgpu_init_hw_sema(ch);
430 if (ret) 443 if (ret) {
431 return NULL; 444 return NULL;
445 }
432 } 446 }
433 447
434 s = nvgpu_kzalloc(ch->g, sizeof(*s)); 448 s = nvgpu_kzalloc(ch->g, sizeof(*s));
435 if (!s) 449 if (!s) {
436 return NULL; 450 return NULL;
451 }
437 452
438 nvgpu_ref_init(&s->ref); 453 nvgpu_ref_init(&s->ref);
439 s->g = ch->g; 454 s->g = ch->g;