summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/fence_gk20a.c
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-31 03:50:52 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-05 07:51:32 -0400
commit43851d41b187c92f5ea9c2f503a882277f661d7e (patch)
tree964a76c136c8c0dc14ec95358d27f930532b7dcb /drivers/gpu/nvgpu/gk20a/fence_gk20a.c
parent0f97bd4d44c8bcedf298f725fe0b6cfc70fa81ff (diff)
gpu: nvgpu: gk20a: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces by introducing the braces. JIRA NVGPU-671 Change-Id: Iedac7d50aa2ebd409434eea5fda902b16d9c6fea Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1797695 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fence_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fence_gk20a.c47
1 files changed, 31 insertions, 16 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fence_gk20a.c b/drivers/gpu/nvgpu/gk20a/fence_gk20a.c
index 8f585afd..0df73278 100644
--- a/drivers/gpu/nvgpu/gk20a/fence_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fence_gk20a.c
@@ -44,29 +44,35 @@ static void gk20a_fence_free(struct nvgpu_ref *ref)
44 container_of(ref, struct gk20a_fence, ref); 44 container_of(ref, struct gk20a_fence, ref);
45 struct gk20a *g = f->g; 45 struct gk20a *g = f->g;
46 46
47 if (nvgpu_os_fence_is_initialized(&f->os_fence)) 47 if (nvgpu_os_fence_is_initialized(&f->os_fence)) {
48 f->os_fence.ops->drop_ref(&f->os_fence); 48 f->os_fence.ops->drop_ref(&f->os_fence);
49 }
49 50
50 if (f->semaphore) 51 if (f->semaphore) {
51 nvgpu_semaphore_put(f->semaphore); 52 nvgpu_semaphore_put(f->semaphore);
53 }
52 54
53 if (f->allocator) { 55 if (f->allocator) {
54 if (nvgpu_alloc_initialized(f->allocator)) 56 if (nvgpu_alloc_initialized(f->allocator)) {
55 nvgpu_free(f->allocator, (u64)(uintptr_t)f); 57 nvgpu_free(f->allocator, (u64)(uintptr_t)f);
56 } else 58 }
59 } else {
57 nvgpu_kfree(g, f); 60 nvgpu_kfree(g, f);
61 }
58} 62}
59 63
60void gk20a_fence_put(struct gk20a_fence *f) 64void gk20a_fence_put(struct gk20a_fence *f)
61{ 65{
62 if (f) 66 if (f) {
63 nvgpu_ref_put(&f->ref, gk20a_fence_free); 67 nvgpu_ref_put(&f->ref, gk20a_fence_free);
68 }
64} 69}
65 70
66struct gk20a_fence *gk20a_fence_get(struct gk20a_fence *f) 71struct gk20a_fence *gk20a_fence_get(struct gk20a_fence *f)
67{ 72{
68 if (f) 73 if (f) {
69 nvgpu_ref_get(&f->ref); 74 nvgpu_ref_get(&f->ref);
75 }
70 return f; 76 return f;
71} 77}
72 78
@@ -81,8 +87,9 @@ inline bool gk20a_fence_is_valid(struct gk20a_fence *f)
81int gk20a_fence_install_fd(struct gk20a_fence *f, int fd) 87int gk20a_fence_install_fd(struct gk20a_fence *f, int fd)
82{ 88{
83 if (!f || !gk20a_fence_is_valid(f) || 89 if (!f || !gk20a_fence_is_valid(f) ||
84 !nvgpu_os_fence_is_initialized(&f->os_fence)) 90 !nvgpu_os_fence_is_initialized(&f->os_fence)) {
85 return -EINVAL; 91 return -EINVAL;
92 }
86 93
87 f->os_fence.ops->install_fence(&f->os_fence, fd); 94 f->os_fence.ops->install_fence(&f->os_fence, fd);
88 95
@@ -93,8 +100,9 @@ int gk20a_fence_wait(struct gk20a *g, struct gk20a_fence *f,
93 unsigned long timeout) 100 unsigned long timeout)
94{ 101{
95 if (f && gk20a_fence_is_valid(f)) { 102 if (f && gk20a_fence_is_valid(f)) {
96 if (!nvgpu_platform_is_silicon(g)) 103 if (!nvgpu_platform_is_silicon(g)) {
97 timeout = MAX_SCHEDULE_TIMEOUT; 104 timeout = MAX_SCHEDULE_TIMEOUT;
105 }
98 return f->ops->wait(f, timeout); 106 return f->ops->wait(f, timeout);
99 } 107 }
100 return 0; 108 return 0;
@@ -102,10 +110,11 @@ int gk20a_fence_wait(struct gk20a *g, struct gk20a_fence *f,
102 110
103bool gk20a_fence_is_expired(struct gk20a_fence *f) 111bool gk20a_fence_is_expired(struct gk20a_fence *f)
104{ 112{
105 if (f && gk20a_fence_is_valid(f) && f->ops) 113 if (f && gk20a_fence_is_valid(f) && f->ops) {
106 return f->ops->is_expired(f); 114 return f->ops->is_expired(f);
107 else 115 } else {
108 return true; 116 return true;
117 }
109} 118}
110 119
111int gk20a_alloc_fence_pool(struct channel_gk20a *c, unsigned int count) 120int gk20a_alloc_fence_pool(struct channel_gk20a *c, unsigned int count)
@@ -120,14 +129,16 @@ int gk20a_alloc_fence_pool(struct channel_gk20a *c, unsigned int count)
120 fence_pool = nvgpu_vzalloc(c->g, size); 129 fence_pool = nvgpu_vzalloc(c->g, size);
121 } 130 }
122 131
123 if (!fence_pool) 132 if (!fence_pool) {
124 return -ENOMEM; 133 return -ENOMEM;
134 }
125 135
126 err = nvgpu_lockless_allocator_init(c->g, &c->fence_allocator, 136 err = nvgpu_lockless_allocator_init(c->g, &c->fence_allocator,
127 "fence_pool", (size_t)fence_pool, size, 137 "fence_pool", (size_t)fence_pool, size,
128 sizeof(struct gk20a_fence), 0); 138 sizeof(struct gk20a_fence), 0);
129 if (err) 139 if (err) {
130 goto fail; 140 goto fail;
141 }
131 142
132 return 0; 143 return 0;
133 144
@@ -163,8 +174,9 @@ struct gk20a_fence *gk20a_alloc_fence(struct channel_gk20a *c)
163 fence->allocator = &c->fence_allocator; 174 fence->allocator = &c->fence_allocator;
164 } 175 }
165 } 176 }
166 } else 177 } else {
167 fence = nvgpu_kzalloc(c->g, sizeof(struct gk20a_fence)); 178 fence = nvgpu_kzalloc(c->g, sizeof(struct gk20a_fence));
179 }
168 180
169 if (fence) { 181 if (fence) {
170 nvgpu_ref_init(&fence->ref); 182 nvgpu_ref_init(&fence->ref);
@@ -178,8 +190,9 @@ void gk20a_init_fence(struct gk20a_fence *f,
178 const struct gk20a_fence_ops *ops, 190 const struct gk20a_fence_ops *ops,
179 struct nvgpu_os_fence os_fence) 191 struct nvgpu_os_fence os_fence)
180{ 192{
181 if (!f) 193 if (!f) {
182 return; 194 return;
195 }
183 f->ops = ops; 196 f->ops = ops;
184 f->syncpt_id = -1; 197 f->syncpt_id = -1;
185 f->semaphore = NULL; 198 f->semaphore = NULL;
@@ -190,8 +203,9 @@ void gk20a_init_fence(struct gk20a_fence *f,
190 203
191static int nvgpu_semaphore_fence_wait(struct gk20a_fence *f, long timeout) 204static int nvgpu_semaphore_fence_wait(struct gk20a_fence *f, long timeout)
192{ 205{
193 if (!nvgpu_semaphore_is_acquired(f->semaphore)) 206 if (!nvgpu_semaphore_is_acquired(f->semaphore)) {
194 return 0; 207 return 0;
208 }
195 209
196 return NVGPU_COND_WAIT_INTERRUPTIBLE( 210 return NVGPU_COND_WAIT_INTERRUPTIBLE(
197 f->semaphore_wq, 211 f->semaphore_wq,
@@ -219,8 +233,9 @@ int gk20a_fence_from_semaphore(
219 struct gk20a_fence *f = fence_out; 233 struct gk20a_fence *f = fence_out;
220 234
221 gk20a_init_fence(f, &nvgpu_semaphore_fence_ops, os_fence); 235 gk20a_init_fence(f, &nvgpu_semaphore_fence_ops, os_fence);
222 if (!f) 236 if (!f) {
223 return -EINVAL; 237 return -EINVAL;
238 }
224 239
225 240
226 f->semaphore = semaphore; 241 f->semaphore = semaphore;