summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-16 02:03:55 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-20 08:46:25 -0400
commit9e69e0cf978b53706f55ffb873e3966b4bb3a7a8 (patch)
tree2437cda373f2c37419e14b89772fb3c5f6d234e4
parentde10cedf8caca9fd01f1b85031e538843da23252 (diff)
gpu: nvgpu: common: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: I599cce2af1d6cdc24efefba4ec42abfe998aec47 Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1795845 Reviewed-by: Adeel Raza <araza@nvidia.com> Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/as.c25
-rw-r--r--drivers/gpu/nvgpu/common/bus/bus_gm20b.c6
-rw-r--r--drivers/gpu/nvgpu/common/bus/bus_gp10b.c6
-rw-r--r--drivers/gpu/nvgpu/common/enabled.c10
-rw-r--r--drivers/gpu/nvgpu/common/falcon/falcon.c83
-rw-r--r--drivers/gpu/nvgpu/common/fb/fb_gk20a.c12
-rw-r--r--drivers/gpu/nvgpu/common/fb/fb_gm20b.c3
-rw-r--r--drivers/gpu/nvgpu/common/fb/fb_gv100.c3
-rw-r--r--drivers/gpu/nvgpu/common/fb/fb_gv11b.c162
9 files changed, 204 insertions, 106 deletions
diff --git a/drivers/gpu/nvgpu/common/as.c b/drivers/gpu/nvgpu/common/as.c
index 75537ecc..f6fc3a06 100644
--- a/drivers/gpu/nvgpu/common/as.c
+++ b/drivers/gpu/nvgpu/common/as.c
@@ -64,11 +64,14 @@ static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
64 if (big_page_size == 0) { 64 if (big_page_size == 0) {
65 big_page_size = g->ops.mm.get_default_big_page_size(); 65 big_page_size = g->ops.mm.get_default_big_page_size();
66 } else { 66 } else {
67 if (!is_power_of_2(big_page_size)) 67 if (!is_power_of_2(big_page_size)) {
68 return -EINVAL; 68 return -EINVAL;
69 }
69 70
70 if (!(big_page_size & nvgpu_mm_get_available_big_page_sizes(g))) 71 if (!(big_page_size &
72 nvgpu_mm_get_available_big_page_sizes(g))) {
71 return -EINVAL; 73 return -EINVAL;
74 }
72 } 75 }
73 76
74 snprintf(name, sizeof(name), "as_%d", as_share->id); 77 snprintf(name, sizeof(name), "as_%d", as_share->id);
@@ -78,8 +81,9 @@ static int gk20a_vm_alloc_share(struct gk20a_as_share *as_share,
78 mm->channel.kernel_size, 81 mm->channel.kernel_size,
79 mm->channel.user_size + mm->channel.kernel_size, 82 mm->channel.user_size + mm->channel.kernel_size,
80 !mm->disable_bigpage, userspace_managed, name); 83 !mm->disable_bigpage, userspace_managed, name);
81 if (!vm) 84 if (!vm) {
82 return -ENOMEM; 85 return -ENOMEM;
86 }
83 87
84 as_share->vm = vm; 88 as_share->vm = vm;
85 vm->as_share = as_share; 89 vm->as_share = as_share;
@@ -97,26 +101,30 @@ int gk20a_as_alloc_share(struct gk20a *g,
97 101
98 nvgpu_log_fn(g, " "); 102 nvgpu_log_fn(g, " ");
99 g = gk20a_get(g); 103 g = gk20a_get(g);
100 if (!g) 104 if (!g) {
101 return -ENODEV; 105 return -ENODEV;
106 }
102 107
103 *out = NULL; 108 *out = NULL;
104 as_share = nvgpu_kzalloc(g, sizeof(*as_share)); 109 as_share = nvgpu_kzalloc(g, sizeof(*as_share));
105 if (!as_share) 110 if (!as_share) {
106 return -ENOMEM; 111 return -ENOMEM;
112 }
107 113
108 as_share->as = &g->as; 114 as_share->as = &g->as;
109 as_share->id = generate_as_share_id(as_share->as); 115 as_share->id = generate_as_share_id(as_share->as);
110 116
111 /* this will set as_share->vm. */ 117 /* this will set as_share->vm. */
112 err = gk20a_busy(g); 118 err = gk20a_busy(g);
113 if (err) 119 if (err) {
114 goto failed; 120 goto failed;
121 }
115 err = gk20a_vm_alloc_share(as_share, big_page_size, flags); 122 err = gk20a_vm_alloc_share(as_share, big_page_size, flags);
116 gk20a_idle(g); 123 gk20a_idle(g);
117 124
118 if (err) 125 if (err) {
119 goto failed; 126 goto failed;
127 }
120 128
121 *out = as_share; 129 *out = as_share;
122 return 0; 130 return 0;
@@ -154,8 +162,9 @@ int gk20a_as_release_share(struct gk20a_as_share *as_share)
154 162
155 err = gk20a_busy(g); 163 err = gk20a_busy(g);
156 164
157 if (err) 165 if (err) {
158 goto release_fail; 166 goto release_fail;
167 }
159 168
160 err = gk20a_vm_release_share(as_share); 169 err = gk20a_vm_release_share(as_share);
161 170
diff --git a/drivers/gpu/nvgpu/common/bus/bus_gm20b.c b/drivers/gpu/nvgpu/common/bus/bus_gm20b.c
index d2c4c2b7..ef5fee8c 100644
--- a/drivers/gpu/nvgpu/common/bus/bus_gm20b.c
+++ b/drivers/gpu/nvgpu/common/bus/bus_gm20b.c
@@ -53,14 +53,16 @@ int gm20b_bus_bar1_bind(struct gk20a *g, struct nvgpu_mem *bar1_inst)
53 u32 val = gk20a_readl(g, bus_bind_status_r()); 53 u32 val = gk20a_readl(g, bus_bind_status_r());
54 u32 pending = bus_bind_status_bar1_pending_v(val); 54 u32 pending = bus_bind_status_bar1_pending_v(val);
55 u32 outstanding = bus_bind_status_bar1_outstanding_v(val); 55 u32 outstanding = bus_bind_status_bar1_outstanding_v(val);
56 if (!pending && !outstanding) 56 if (!pending && !outstanding) {
57 break; 57 break;
58 }
58 59
59 nvgpu_udelay(5); 60 nvgpu_udelay(5);
60 } while (!nvgpu_timeout_expired(&timeout)); 61 } while (!nvgpu_timeout_expired(&timeout));
61 62
62 if (nvgpu_timeout_peek_expired(&timeout)) 63 if (nvgpu_timeout_peek_expired(&timeout)) {
63 err = -EINVAL; 64 err = -EINVAL;
65 }
64 66
65 return err; 67 return err;
66} 68}
diff --git a/drivers/gpu/nvgpu/common/bus/bus_gp10b.c b/drivers/gpu/nvgpu/common/bus/bus_gp10b.c
index 11e60c53..8aa75d8d 100644
--- a/drivers/gpu/nvgpu/common/bus/bus_gp10b.c
+++ b/drivers/gpu/nvgpu/common/bus/bus_gp10b.c
@@ -50,14 +50,16 @@ int gp10b_bus_bar2_bind(struct gk20a *g, struct nvgpu_mem *bar2_inst)
50 u32 val = gk20a_readl(g, bus_bind_status_r()); 50 u32 val = gk20a_readl(g, bus_bind_status_r());
51 u32 pending = bus_bind_status_bar2_pending_v(val); 51 u32 pending = bus_bind_status_bar2_pending_v(val);
52 u32 outstanding = bus_bind_status_bar2_outstanding_v(val); 52 u32 outstanding = bus_bind_status_bar2_outstanding_v(val);
53 if (!pending && !outstanding) 53 if (!pending && !outstanding) {
54 break; 54 break;
55 }
55 56
56 nvgpu_udelay(5); 57 nvgpu_udelay(5);
57 } while (!nvgpu_timeout_expired(&timeout)); 58 } while (!nvgpu_timeout_expired(&timeout));
58 59
59 if (nvgpu_timeout_peek_expired(&timeout)) 60 if (nvgpu_timeout_peek_expired(&timeout)) {
60 err = -EINVAL; 61 err = -EINVAL;
62 }
61 63
62 return err; 64 return err;
63} 65}
diff --git a/drivers/gpu/nvgpu/common/enabled.c b/drivers/gpu/nvgpu/common/enabled.c
index cded36c8..00df9e3b 100644
--- a/drivers/gpu/nvgpu/common/enabled.c
+++ b/drivers/gpu/nvgpu/common/enabled.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2017-18, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -34,8 +34,9 @@ int nvgpu_init_enabled_flags(struct gk20a *g)
34 g->enabled_flags = nvgpu_kzalloc(g, 34 g->enabled_flags = nvgpu_kzalloc(g,
35 BITS_TO_LONGS(NVGPU_MAX_ENABLED_BITS) * 35 BITS_TO_LONGS(NVGPU_MAX_ENABLED_BITS) *
36 sizeof(unsigned long)); 36 sizeof(unsigned long));
37 if (!g->enabled_flags) 37 if (!g->enabled_flags) {
38 return -ENOMEM; 38 return -ENOMEM;
39 }
39 40
40 return 0; 41 return 0;
41} 42}
@@ -55,8 +56,9 @@ bool nvgpu_is_enabled(struct gk20a *g, int flag)
55 56
56bool __nvgpu_set_enabled(struct gk20a *g, int flag, bool state) 57bool __nvgpu_set_enabled(struct gk20a *g, int flag, bool state)
57{ 58{
58 if (state) 59 if (state) {
59 return test_and_set_bit(flag, g->enabled_flags); 60 return test_and_set_bit(flag, g->enabled_flags);
60 else 61 } else {
61 return test_and_clear_bit(flag, g->enabled_flags); 62 return test_and_clear_bit(flag, g->enabled_flags);
63 }
62} 64}
diff --git a/drivers/gpu/nvgpu/common/falcon/falcon.c b/drivers/gpu/nvgpu/common/falcon/falcon.c
index 41dca0cd..81ba5e81 100644
--- a/drivers/gpu/nvgpu/common/falcon/falcon.c
+++ b/drivers/gpu/nvgpu/common/falcon/falcon.c
@@ -50,12 +50,14 @@ int nvgpu_flcn_wait_idle(struct nvgpu_falcon *flcn)
50 do { 50 do {
51 idle_stat = flcn_ops->is_falcon_idle(flcn); 51 idle_stat = flcn_ops->is_falcon_idle(flcn);
52 52
53 if (idle_stat) 53 if (idle_stat) {
54 break; 54 break;
55 }
55 56
56 if (nvgpu_timeout_expired_msg(&timeout, 57 if (nvgpu_timeout_expired_msg(&timeout,
57 "waiting for falcon idle: 0x%08x", idle_stat)) 58 "waiting for falcon idle: 0x%08x", idle_stat)) {
58 return -EBUSY; 59 return -EBUSY;
60 }
59 61
60 nvgpu_usleep_range(100, 200); 62 nvgpu_usleep_range(100, 200);
61 } while (1); 63 } while (1);
@@ -74,13 +76,15 @@ int nvgpu_flcn_mem_scrub_wait(struct nvgpu_falcon *flcn)
74 MEM_SCRUBBING_TIMEOUT_DEFAULT, 76 MEM_SCRUBBING_TIMEOUT_DEFAULT,
75 NVGPU_TIMER_RETRY_TIMER); 77 NVGPU_TIMER_RETRY_TIMER);
76 do { 78 do {
77 if (nvgpu_flcn_get_mem_scrubbing_status(flcn)) 79 if (nvgpu_flcn_get_mem_scrubbing_status(flcn)) {
78 goto exit; 80 goto exit;
81 }
79 nvgpu_udelay(MEM_SCRUBBING_TIMEOUT_DEFAULT); 82 nvgpu_udelay(MEM_SCRUBBING_TIMEOUT_DEFAULT);
80 } while (!nvgpu_timeout_expired(&timeout)); 83 } while (!nvgpu_timeout_expired(&timeout));
81 84
82 if (nvgpu_timeout_peek_expired(&timeout)) 85 if (nvgpu_timeout_peek_expired(&timeout)) {
83 status = -ETIMEDOUT; 86 status = -ETIMEDOUT;
87 }
84 88
85exit: 89exit:
86 return status; 90 return status;
@@ -92,8 +96,9 @@ int nvgpu_flcn_reset(struct nvgpu_falcon *flcn)
92 96
93 if (flcn->flcn_ops.reset) { 97 if (flcn->flcn_ops.reset) {
94 status = flcn->flcn_ops.reset(flcn); 98 status = flcn->flcn_ops.reset(flcn);
95 if (!status) 99 if (!status) {
96 status = nvgpu_flcn_mem_scrub_wait(flcn); 100 status = nvgpu_flcn_mem_scrub_wait(flcn);
101 }
97 } else { 102 } else {
98 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", 103 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
99 flcn->flcn_id); 104 flcn->flcn_id);
@@ -112,9 +117,10 @@ void nvgpu_flcn_set_irq(struct nvgpu_falcon *flcn, bool enable,
112 flcn->intr_mask = intr_mask; 117 flcn->intr_mask = intr_mask;
113 flcn->intr_dest = intr_dest; 118 flcn->intr_dest = intr_dest;
114 flcn_ops->set_irq(flcn, enable); 119 flcn_ops->set_irq(flcn, enable);
115 } else 120 } else {
116 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", 121 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
117 flcn->flcn_id); 122 flcn->flcn_id);
123 }
118} 124}
119 125
120bool nvgpu_flcn_get_mem_scrubbing_status(struct nvgpu_falcon *flcn) 126bool nvgpu_flcn_get_mem_scrubbing_status(struct nvgpu_falcon *flcn)
@@ -122,11 +128,12 @@ bool nvgpu_flcn_get_mem_scrubbing_status(struct nvgpu_falcon *flcn)
122 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; 128 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
123 bool status = false; 129 bool status = false;
124 130
125 if (flcn_ops->is_falcon_scrubbing_done) 131 if (flcn_ops->is_falcon_scrubbing_done) {
126 status = flcn_ops->is_falcon_scrubbing_done(flcn); 132 status = flcn_ops->is_falcon_scrubbing_done(flcn);
127 else 133 } else {
128 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", 134 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
129 flcn->flcn_id); 135 flcn->flcn_id);
136 }
130 137
131 return status; 138 return status;
132} 139}
@@ -136,11 +143,12 @@ bool nvgpu_flcn_get_cpu_halted_status(struct nvgpu_falcon *flcn)
136 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; 143 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
137 bool status = false; 144 bool status = false;
138 145
139 if (flcn_ops->is_falcon_cpu_halted) 146 if (flcn_ops->is_falcon_cpu_halted) {
140 status = flcn_ops->is_falcon_cpu_halted(flcn); 147 status = flcn_ops->is_falcon_cpu_halted(flcn);
141 else 148 } else {
142 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", 149 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
143 flcn->flcn_id); 150 flcn->flcn_id);
151 }
144 152
145 return status; 153 return status;
146} 154}
@@ -153,14 +161,16 @@ int nvgpu_flcn_wait_for_halt(struct nvgpu_falcon *flcn, unsigned int timeout)
153 161
154 nvgpu_timeout_init(g, &to, timeout, NVGPU_TIMER_CPU_TIMER); 162 nvgpu_timeout_init(g, &to, timeout, NVGPU_TIMER_CPU_TIMER);
155 do { 163 do {
156 if (nvgpu_flcn_get_cpu_halted_status(flcn)) 164 if (nvgpu_flcn_get_cpu_halted_status(flcn)) {
157 break; 165 break;
166 }
158 167
159 nvgpu_udelay(10); 168 nvgpu_udelay(10);
160 } while (!nvgpu_timeout_expired(&to)); 169 } while (!nvgpu_timeout_expired(&to));
161 170
162 if (nvgpu_timeout_peek_expired(&to)) 171 if (nvgpu_timeout_peek_expired(&to)) {
163 status = -EBUSY; 172 status = -EBUSY;
173 }
164 174
165 return status; 175 return status;
166} 176}
@@ -181,14 +191,16 @@ int nvgpu_flcn_clear_halt_intr_status(struct nvgpu_falcon *flcn,
181 191
182 nvgpu_timeout_init(g, &to, timeout, NVGPU_TIMER_CPU_TIMER); 192 nvgpu_timeout_init(g, &to, timeout, NVGPU_TIMER_CPU_TIMER);
183 do { 193 do {
184 if (flcn_ops->clear_halt_interrupt_status(flcn)) 194 if (flcn_ops->clear_halt_interrupt_status(flcn)) {
185 break; 195 break;
196 }
186 197
187 nvgpu_udelay(1); 198 nvgpu_udelay(1);
188 } while (!nvgpu_timeout_expired(&to)); 199 } while (!nvgpu_timeout_expired(&to));
189 200
190 if (nvgpu_timeout_peek_expired(&to)) 201 if (nvgpu_timeout_peek_expired(&to)) {
191 status = -EBUSY; 202 status = -EBUSY;
203 }
192 204
193 return status; 205 return status;
194} 206}
@@ -198,11 +210,12 @@ bool nvgpu_flcn_get_idle_status(struct nvgpu_falcon *flcn)
198 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; 210 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
199 bool status = false; 211 bool status = false;
200 212
201 if (flcn_ops->is_falcon_idle) 213 if (flcn_ops->is_falcon_idle) {
202 status = flcn_ops->is_falcon_idle(flcn); 214 status = flcn_ops->is_falcon_idle(flcn);
203 else 215 } else {
204 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", 216 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
205 flcn->flcn_id); 217 flcn->flcn_id);
218 }
206 219
207 return status; 220 return status;
208} 221}
@@ -229,11 +242,12 @@ int nvgpu_flcn_copy_from_imem(struct nvgpu_falcon *flcn,
229 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; 242 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
230 int status = -EINVAL; 243 int status = -EINVAL;
231 244
232 if (flcn_ops->copy_from_imem) 245 if (flcn_ops->copy_from_imem) {
233 status = flcn_ops->copy_from_imem(flcn, src, dst, size, port); 246 status = flcn_ops->copy_from_imem(flcn, src, dst, size, port);
234 else 247 } else {
235 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", 248 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
236 flcn->flcn_id); 249 flcn->flcn_id);
250 }
237 251
238 return status; 252 return status;
239} 253}
@@ -244,12 +258,13 @@ int nvgpu_flcn_copy_to_imem(struct nvgpu_falcon *flcn,
244 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; 258 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
245 int status = -EINVAL; 259 int status = -EINVAL;
246 260
247 if (flcn_ops->copy_to_imem) 261 if (flcn_ops->copy_to_imem) {
248 status = flcn_ops->copy_to_imem(flcn, dst, src, size, port, 262 status = flcn_ops->copy_to_imem(flcn, dst, src, size, port,
249 sec, tag); 263 sec, tag);
250 else 264 } else {
251 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", 265 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
252 flcn->flcn_id); 266 flcn->flcn_id);
267 }
253 268
254 return status; 269 return status;
255} 270}
@@ -269,15 +284,17 @@ static void nvgpu_flcn_print_mem(struct nvgpu_falcon *flcn, u32 src,
269 do { 284 do {
270 byte_read_count = total_block_read ? sizeof(buff) : size; 285 byte_read_count = total_block_read ? sizeof(buff) : size;
271 286
272 if (!byte_read_count) 287 if (!byte_read_count) {
273 break; 288 break;
289 }
274 290
275 if (mem_type == MEM_DMEM) 291 if (mem_type == MEM_DMEM) {
276 status = nvgpu_flcn_copy_from_dmem(flcn, src, 292 status = nvgpu_flcn_copy_from_dmem(flcn, src,
277 (u8 *)buff, byte_read_count, 0); 293 (u8 *)buff, byte_read_count, 0);
278 else 294 } else {
279 status = nvgpu_flcn_copy_from_imem(flcn, src, 295 status = nvgpu_flcn_copy_from_imem(flcn, src,
280 (u8 *)buff, byte_read_count, 0); 296 (u8 *)buff, byte_read_count, 0);
297 }
281 298
282 if (status) { 299 if (status) {
283 nvgpu_err(flcn->g, "MEM print failed"); 300 nvgpu_err(flcn->g, "MEM print failed");
@@ -312,11 +329,12 @@ int nvgpu_flcn_bootstrap(struct nvgpu_falcon *flcn, u32 boot_vector)
312 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; 329 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
313 int status = -EINVAL; 330 int status = -EINVAL;
314 331
315 if (flcn_ops->bootstrap) 332 if (flcn_ops->bootstrap) {
316 status = flcn_ops->bootstrap(flcn, boot_vector); 333 status = flcn_ops->bootstrap(flcn, boot_vector);
317 else 334 } else {
318 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", 335 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
319 flcn->flcn_id); 336 flcn->flcn_id);
337 }
320 338
321 return status; 339 return status;
322} 340}
@@ -326,11 +344,12 @@ u32 nvgpu_flcn_mailbox_read(struct nvgpu_falcon *flcn, u32 mailbox_index)
326 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; 344 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
327 u32 data = 0; 345 u32 data = 0;
328 346
329 if (flcn_ops->mailbox_read) 347 if (flcn_ops->mailbox_read) {
330 data = flcn_ops->mailbox_read(flcn, mailbox_index); 348 data = flcn_ops->mailbox_read(flcn, mailbox_index);
331 else 349 } else {
332 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", 350 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
333 flcn->flcn_id); 351 flcn->flcn_id);
352 }
334 353
335 return data; 354 return data;
336} 355}
@@ -340,22 +359,24 @@ void nvgpu_flcn_mailbox_write(struct nvgpu_falcon *flcn, u32 mailbox_index,
340{ 359{
341 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; 360 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
342 361
343 if (flcn_ops->mailbox_write) 362 if (flcn_ops->mailbox_write) {
344 flcn_ops->mailbox_write(flcn, mailbox_index, data); 363 flcn_ops->mailbox_write(flcn, mailbox_index, data);
345 else 364 } else {
346 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", 365 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
347 flcn->flcn_id); 366 flcn->flcn_id);
367 }
348} 368}
349 369
350void nvgpu_flcn_dump_stats(struct nvgpu_falcon *flcn) 370void nvgpu_flcn_dump_stats(struct nvgpu_falcon *flcn)
351{ 371{
352 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops; 372 struct nvgpu_falcon_ops *flcn_ops = &flcn->flcn_ops;
353 373
354 if (flcn_ops->dump_falcon_stats) 374 if (flcn_ops->dump_falcon_stats) {
355 flcn_ops->dump_falcon_stats(flcn); 375 flcn_ops->dump_falcon_stats(flcn);
356 else 376 } else {
357 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ", 377 nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
358 flcn->flcn_id); 378 flcn->flcn_id);
379 }
359} 380}
360 381
361int nvgpu_flcn_bl_bootstrap(struct nvgpu_falcon *flcn, 382int nvgpu_flcn_bl_bootstrap(struct nvgpu_falcon *flcn,
diff --git a/drivers/gpu/nvgpu/common/fb/fb_gk20a.c b/drivers/gpu/nvgpu/common/fb/fb_gk20a.c
index 903bb983..d7e1a8a7 100644
--- a/drivers/gpu/nvgpu/common/fb/fb_gk20a.c
+++ b/drivers/gpu/nvgpu/common/fb/fb_gk20a.c
@@ -74,8 +74,9 @@ void gk20a_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
74 hw. Use the power_on flag to skip tlb invalidation when gpu 74 hw. Use the power_on flag to skip tlb invalidation when gpu
75 power is turned off */ 75 power is turned off */
76 76
77 if (!g->power_on) 77 if (!g->power_on) {
78 return; 78 return;
79 }
79 80
80 addr_lo = u64_lo32(nvgpu_mem_get_addr(g, pdb) >> 12); 81 addr_lo = u64_lo32(nvgpu_mem_get_addr(g, pdb) >> 12);
81 82
@@ -87,14 +88,16 @@ void gk20a_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
87 88
88 do { 89 do {
89 data = gk20a_readl(g, fb_mmu_ctrl_r()); 90 data = gk20a_readl(g, fb_mmu_ctrl_r());
90 if (fb_mmu_ctrl_pri_fifo_space_v(data) != 0) 91 if (fb_mmu_ctrl_pri_fifo_space_v(data) != 0) {
91 break; 92 break;
93 }
92 nvgpu_udelay(2); 94 nvgpu_udelay(2);
93 } while (!nvgpu_timeout_expired_msg(&timeout, 95 } while (!nvgpu_timeout_expired_msg(&timeout,
94 "wait mmu fifo space")); 96 "wait mmu fifo space"));
95 97
96 if (nvgpu_timeout_peek_expired(&timeout)) 98 if (nvgpu_timeout_peek_expired(&timeout)) {
97 goto out; 99 goto out;
100 }
98 101
99 nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER); 102 nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
100 103
@@ -112,8 +115,9 @@ void gk20a_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
112 do { 115 do {
113 data = gk20a_readl(g, fb_mmu_ctrl_r()); 116 data = gk20a_readl(g, fb_mmu_ctrl_r());
114 if (fb_mmu_ctrl_pri_fifo_empty_v(data) != 117 if (fb_mmu_ctrl_pri_fifo_empty_v(data) !=
115 fb_mmu_ctrl_pri_fifo_empty_false_f()) 118 fb_mmu_ctrl_pri_fifo_empty_false_f()) {
116 break; 119 break;
120 }
117 nvgpu_udelay(2); 121 nvgpu_udelay(2);
118 } while (!nvgpu_timeout_expired_msg(&timeout, 122 } while (!nvgpu_timeout_expired_msg(&timeout,
119 "wait mmu invalidate")); 123 "wait mmu invalidate"));
diff --git a/drivers/gpu/nvgpu/common/fb/fb_gm20b.c b/drivers/gpu/nvgpu/common/fb/fb_gm20b.c
index 9f207771..511e13be 100644
--- a/drivers/gpu/nvgpu/common/fb/fb_gm20b.c
+++ b/drivers/gpu/nvgpu/common/fb/fb_gm20b.c
@@ -148,8 +148,9 @@ static int gm20b_fb_vpr_info_fetch_wait(struct gk20a *g,
148 148
149 val = gk20a_readl(g, fb_mmu_vpr_info_r()); 149 val = gk20a_readl(g, fb_mmu_vpr_info_r());
150 if (fb_mmu_vpr_info_fetch_v(val) == 150 if (fb_mmu_vpr_info_fetch_v(val) ==
151 fb_mmu_vpr_info_fetch_false_v()) 151 fb_mmu_vpr_info_fetch_false_v()) {
152 return 0; 152 return 0;
153 }
153 154
154 } while (!nvgpu_timeout_expired(&timeout)); 155 } while (!nvgpu_timeout_expired(&timeout));
155 156
diff --git a/drivers/gpu/nvgpu/common/fb/fb_gv100.c b/drivers/gpu/nvgpu/common/fb/fb_gv100.c
index e68062c0..e1c95045 100644
--- a/drivers/gpu/nvgpu/common/fb/fb_gv100.c
+++ b/drivers/gpu/nvgpu/common/fb/fb_gv100.c
@@ -200,8 +200,9 @@ int gv100_fb_memory_unlock(struct gk20a *g)
200 falcon_falcon_sctl_r())); 200 falcon_falcon_sctl_r()));
201 201
202exit: 202exit:
203 if (mem_unlock_fw) 203 if (mem_unlock_fw) {
204 nvgpu_release_firmware(g, mem_unlock_fw); 204 nvgpu_release_firmware(g, mem_unlock_fw);
205 }
205 206
206 nvgpu_log_fn(g, "done, status - %d", err); 207 nvgpu_log_fn(g, "done, status - %d", err);
207 208
diff --git a/drivers/gpu/nvgpu/common/fb/fb_gv11b.c b/drivers/gpu/nvgpu/common/fb/fb_gv11b.c
index b6121f4d..d5ad495a 100644
--- a/drivers/gpu/nvgpu/common/fb/fb_gv11b.c
+++ b/drivers/gpu/nvgpu/common/fb/fb_gv11b.c
@@ -97,12 +97,13 @@ void gv11b_fb_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
97 u64 compbit_store_iova; 97 u64 compbit_store_iova;
98 u64 compbit_base_post_divide64; 98 u64 compbit_base_post_divide64;
99 99
100 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) 100 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
101 compbit_store_iova = nvgpu_mem_get_phys_addr(g, 101 compbit_store_iova = nvgpu_mem_get_phys_addr(g,
102 &gr->compbit_store.mem); 102 &gr->compbit_store.mem);
103 else 103 } else {
104 compbit_store_iova = nvgpu_mem_get_addr(g, 104 compbit_store_iova = nvgpu_mem_get_addr(g,
105 &gr->compbit_store.mem); 105 &gr->compbit_store.mem);
106 }
106 /* must be aligned to 64 KB */ 107 /* must be aligned to 64 KB */
107 compbit_store_iova = roundup(compbit_store_iova, (u64)SZ_64K); 108 compbit_store_iova = roundup(compbit_store_iova, (u64)SZ_64K);
108 109
@@ -115,12 +116,14 @@ void gv11b_fb_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
115 compbit_base_post_multiply64 = ((u64)compbit_base_post_divide * 116 compbit_base_post_multiply64 = ((u64)compbit_base_post_divide *
116 g->ltc_count) << fb_mmu_cbc_base_address_alignment_shift_v(); 117 g->ltc_count) << fb_mmu_cbc_base_address_alignment_shift_v();
117 118
118 if (compbit_base_post_multiply64 < compbit_store_iova) 119 if (compbit_base_post_multiply64 < compbit_store_iova) {
119 compbit_base_post_divide++; 120 compbit_base_post_divide++;
121 }
120 122
121 if (g->ops.ltc.cbc_fix_config) 123 if (g->ops.ltc.cbc_fix_config) {
122 compbit_base_post_divide = 124 compbit_base_post_divide =
123 g->ops.ltc.cbc_fix_config(g, compbit_base_post_divide); 125 g->ops.ltc.cbc_fix_config(g, compbit_base_post_divide);
126 }
124 127
125 gk20a_writel(g, fb_mmu_cbc_base_r(), 128 gk20a_writel(g, fb_mmu_cbc_base_r(),
126 fb_mmu_cbc_base_address_f(compbit_base_post_divide)); 129 fb_mmu_cbc_base_address_f(compbit_base_post_divide));
@@ -250,8 +253,9 @@ static void gv11b_fb_fault_buffer_get_ptr_update(struct gk20a *g,
250 /* while the fault is being handled it is possible for overflow 253 /* while the fault is being handled it is possible for overflow
251 * to happen, 254 * to happen,
252 */ 255 */
253 if (reg_val & fb_mmu_fault_buffer_get_overflow_m()) 256 if (reg_val & fb_mmu_fault_buffer_get_overflow_m()) {
254 reg_val |= fb_mmu_fault_buffer_get_overflow_clear_f(); 257 reg_val |= fb_mmu_fault_buffer_get_overflow_clear_f();
258 }
255 259
256 g->ops.fb.write_mmu_fault_buffer_get(g, index, reg_val); 260 g->ops.fb.write_mmu_fault_buffer_get(g, index, reg_val);
257 261
@@ -341,8 +345,10 @@ void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
341 fault_status = g->ops.fb.read_mmu_fault_status(g); 345 fault_status = g->ops.fb.read_mmu_fault_status(g);
342 346
343 do { 347 do {
344 if (!(fault_status & fb_mmu_fault_status_busy_true_f())) 348 if (!(fault_status &
349 fb_mmu_fault_status_busy_true_f())) {
345 break; 350 break;
351 }
346 /* 352 /*
347 * Make sure fault buffer is disabled. 353 * Make sure fault buffer is disabled.
348 * This is to avoid accessing fault buffer by hw 354 * This is to avoid accessing fault buffer by hw
@@ -435,19 +441,23 @@ void gv11b_handle_l2tlb_ecc_isr(struct gk20a *g, u32 ecc_status)
435 fb_mmu_l2tlb_ecc_status_uncorrected_err_total_counter_overflow_m(); 441 fb_mmu_l2tlb_ecc_status_uncorrected_err_total_counter_overflow_m();
436 442
437 /* clear the interrupt */ 443 /* clear the interrupt */
438 if ((corrected_delta > 0) || corrected_overflow) 444 if ((corrected_delta > 0) || corrected_overflow) {
439 gk20a_writel(g, fb_mmu_l2tlb_ecc_corrected_err_count_r(), 0); 445 gk20a_writel(g, fb_mmu_l2tlb_ecc_corrected_err_count_r(), 0);
440 if ((uncorrected_delta > 0) || uncorrected_overflow) 446 }
447 if ((uncorrected_delta > 0) || uncorrected_overflow) {
441 gk20a_writel(g, fb_mmu_l2tlb_ecc_uncorrected_err_count_r(), 0); 448 gk20a_writel(g, fb_mmu_l2tlb_ecc_uncorrected_err_count_r(), 0);
449 }
442 450
443 gk20a_writel(g, fb_mmu_l2tlb_ecc_status_r(), 451 gk20a_writel(g, fb_mmu_l2tlb_ecc_status_r(),
444 fb_mmu_l2tlb_ecc_status_reset_clear_f()); 452 fb_mmu_l2tlb_ecc_status_reset_clear_f());
445 453
446 /* Handle overflow */ 454 /* Handle overflow */
447 if (corrected_overflow) 455 if (corrected_overflow) {
448 corrected_delta += (0x1UL << fb_mmu_l2tlb_ecc_corrected_err_count_total_s()); 456 corrected_delta += (0x1UL << fb_mmu_l2tlb_ecc_corrected_err_count_total_s());
449 if (uncorrected_overflow) 457 }
458 if (uncorrected_overflow) {
450 uncorrected_delta += (0x1UL << fb_mmu_l2tlb_ecc_uncorrected_err_count_total_s()); 459 uncorrected_delta += (0x1UL << fb_mmu_l2tlb_ecc_uncorrected_err_count_total_s());
460 }
451 461
452 462
453 g->ecc.fb.mmu_l2tlb_ecc_corrected_err_count[0].counter += 463 g->ecc.fb.mmu_l2tlb_ecc_corrected_err_count[0].counter +=
@@ -455,12 +465,17 @@ void gv11b_handle_l2tlb_ecc_isr(struct gk20a *g, u32 ecc_status)
455 g->ecc.fb.mmu_l2tlb_ecc_uncorrected_err_count[0].counter += 465 g->ecc.fb.mmu_l2tlb_ecc_uncorrected_err_count[0].counter +=
456 uncorrected_delta; 466 uncorrected_delta;
457 467
458 if (ecc_status & fb_mmu_l2tlb_ecc_status_corrected_err_l2tlb_sa_data_m()) 468 if (ecc_status &
469 fb_mmu_l2tlb_ecc_status_corrected_err_l2tlb_sa_data_m()) {
459 nvgpu_log(g, gpu_dbg_intr, "corrected ecc sa data error"); 470 nvgpu_log(g, gpu_dbg_intr, "corrected ecc sa data error");
460 if (ecc_status & fb_mmu_l2tlb_ecc_status_uncorrected_err_l2tlb_sa_data_m()) 471 }
472 if (ecc_status &
473 fb_mmu_l2tlb_ecc_status_uncorrected_err_l2tlb_sa_data_m()) {
461 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error"); 474 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error");
462 if (corrected_overflow || uncorrected_overflow) 475 }
476 if (corrected_overflow || uncorrected_overflow) {
463 nvgpu_info(g, "mmu l2tlb ecc counter overflow!"); 477 nvgpu_info(g, "mmu l2tlb ecc counter overflow!");
478 }
464 479
465 nvgpu_log(g, gpu_dbg_intr, 480 nvgpu_log(g, gpu_dbg_intr,
466 "ecc error address: 0x%x", ecc_addr); 481 "ecc error address: 0x%x", ecc_addr);
@@ -493,19 +508,23 @@ void gv11b_handle_hubtlb_ecc_isr(struct gk20a *g, u32 ecc_status)
493 fb_mmu_hubtlb_ecc_status_uncorrected_err_total_counter_overflow_m(); 508 fb_mmu_hubtlb_ecc_status_uncorrected_err_total_counter_overflow_m();
494 509
495 /* clear the interrupt */ 510 /* clear the interrupt */
496 if ((corrected_delta > 0) || corrected_overflow) 511 if ((corrected_delta > 0) || corrected_overflow) {
497 gk20a_writel(g, fb_mmu_hubtlb_ecc_corrected_err_count_r(), 0); 512 gk20a_writel(g, fb_mmu_hubtlb_ecc_corrected_err_count_r(), 0);
498 if ((uncorrected_delta > 0) || uncorrected_overflow) 513 }
514 if ((uncorrected_delta > 0) || uncorrected_overflow) {
499 gk20a_writel(g, fb_mmu_hubtlb_ecc_uncorrected_err_count_r(), 0); 515 gk20a_writel(g, fb_mmu_hubtlb_ecc_uncorrected_err_count_r(), 0);
516 }
500 517
501 gk20a_writel(g, fb_mmu_hubtlb_ecc_status_r(), 518 gk20a_writel(g, fb_mmu_hubtlb_ecc_status_r(),
502 fb_mmu_hubtlb_ecc_status_reset_clear_f()); 519 fb_mmu_hubtlb_ecc_status_reset_clear_f());
503 520
504 /* Handle overflow */ 521 /* Handle overflow */
505 if (corrected_overflow) 522 if (corrected_overflow) {
506 corrected_delta += (0x1UL << fb_mmu_hubtlb_ecc_corrected_err_count_total_s()); 523 corrected_delta += (0x1UL << fb_mmu_hubtlb_ecc_corrected_err_count_total_s());
507 if (uncorrected_overflow) 524 }
525 if (uncorrected_overflow) {
508 uncorrected_delta += (0x1UL << fb_mmu_hubtlb_ecc_uncorrected_err_count_total_s()); 526 uncorrected_delta += (0x1UL << fb_mmu_hubtlb_ecc_uncorrected_err_count_total_s());
527 }
509 528
510 529
511 g->ecc.fb.mmu_hubtlb_ecc_corrected_err_count[0].counter += 530 g->ecc.fb.mmu_hubtlb_ecc_corrected_err_count[0].counter +=
@@ -513,12 +532,15 @@ void gv11b_handle_hubtlb_ecc_isr(struct gk20a *g, u32 ecc_status)
513 g->ecc.fb.mmu_hubtlb_ecc_uncorrected_err_count[0].counter += 532 g->ecc.fb.mmu_hubtlb_ecc_uncorrected_err_count[0].counter +=
514 uncorrected_delta; 533 uncorrected_delta;
515 534
516 if (ecc_status & fb_mmu_hubtlb_ecc_status_corrected_err_sa_data_m()) 535 if (ecc_status & fb_mmu_hubtlb_ecc_status_corrected_err_sa_data_m()) {
517 nvgpu_log(g, gpu_dbg_intr, "corrected ecc sa data error"); 536 nvgpu_log(g, gpu_dbg_intr, "corrected ecc sa data error");
518 if (ecc_status & fb_mmu_hubtlb_ecc_status_uncorrected_err_sa_data_m()) 537 }
538 if (ecc_status & fb_mmu_hubtlb_ecc_status_uncorrected_err_sa_data_m()) {
519 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error"); 539 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error");
520 if (corrected_overflow || uncorrected_overflow) 540 }
541 if (corrected_overflow || uncorrected_overflow) {
521 nvgpu_info(g, "mmu hubtlb ecc counter overflow!"); 542 nvgpu_info(g, "mmu hubtlb ecc counter overflow!");
543 }
522 544
523 nvgpu_log(g, gpu_dbg_intr, 545 nvgpu_log(g, gpu_dbg_intr,
524 "ecc error address: 0x%x", ecc_addr); 546 "ecc error address: 0x%x", ecc_addr);
@@ -551,19 +573,23 @@ void gv11b_handle_fillunit_ecc_isr(struct gk20a *g, u32 ecc_status)
551 fb_mmu_fillunit_ecc_status_uncorrected_err_total_counter_overflow_m(); 573 fb_mmu_fillunit_ecc_status_uncorrected_err_total_counter_overflow_m();
552 574
553 /* clear the interrupt */ 575 /* clear the interrupt */
554 if ((corrected_delta > 0) || corrected_overflow) 576 if ((corrected_delta > 0) || corrected_overflow) {
555 gk20a_writel(g, fb_mmu_fillunit_ecc_corrected_err_count_r(), 0); 577 gk20a_writel(g, fb_mmu_fillunit_ecc_corrected_err_count_r(), 0);
556 if ((uncorrected_delta > 0) || uncorrected_overflow) 578 }
579 if ((uncorrected_delta > 0) || uncorrected_overflow) {
557 gk20a_writel(g, fb_mmu_fillunit_ecc_uncorrected_err_count_r(), 0); 580 gk20a_writel(g, fb_mmu_fillunit_ecc_uncorrected_err_count_r(), 0);
581 }
558 582
559 gk20a_writel(g, fb_mmu_fillunit_ecc_status_r(), 583 gk20a_writel(g, fb_mmu_fillunit_ecc_status_r(),
560 fb_mmu_fillunit_ecc_status_reset_clear_f()); 584 fb_mmu_fillunit_ecc_status_reset_clear_f());
561 585
562 /* Handle overflow */ 586 /* Handle overflow */
563 if (corrected_overflow) 587 if (corrected_overflow) {
564 corrected_delta += (0x1UL << fb_mmu_fillunit_ecc_corrected_err_count_total_s()); 588 corrected_delta += (0x1UL << fb_mmu_fillunit_ecc_corrected_err_count_total_s());
565 if (uncorrected_overflow) 589 }
590 if (uncorrected_overflow) {
566 uncorrected_delta += (0x1UL << fb_mmu_fillunit_ecc_uncorrected_err_count_total_s()); 591 uncorrected_delta += (0x1UL << fb_mmu_fillunit_ecc_uncorrected_err_count_total_s());
592 }
567 593
568 594
569 g->ecc.fb.mmu_fillunit_ecc_corrected_err_count[0].counter += 595 g->ecc.fb.mmu_fillunit_ecc_corrected_err_count[0].counter +=
@@ -571,17 +597,26 @@ void gv11b_handle_fillunit_ecc_isr(struct gk20a *g, u32 ecc_status)
571 g->ecc.fb.mmu_fillunit_ecc_uncorrected_err_count[0].counter += 597 g->ecc.fb.mmu_fillunit_ecc_uncorrected_err_count[0].counter +=
572 uncorrected_delta; 598 uncorrected_delta;
573 599
574 if (ecc_status & fb_mmu_fillunit_ecc_status_corrected_err_pte_data_m()) 600 if (ecc_status &
601 fb_mmu_fillunit_ecc_status_corrected_err_pte_data_m()) {
575 nvgpu_log(g, gpu_dbg_intr, "corrected ecc pte data error"); 602 nvgpu_log(g, gpu_dbg_intr, "corrected ecc pte data error");
576 if (ecc_status & fb_mmu_fillunit_ecc_status_uncorrected_err_pte_data_m()) 603 }
604 if (ecc_status &
605 fb_mmu_fillunit_ecc_status_uncorrected_err_pte_data_m()) {
577 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc pte data error"); 606 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc pte data error");
578 if (ecc_status & fb_mmu_fillunit_ecc_status_corrected_err_pde0_data_m()) 607 }
608 if (ecc_status &
609 fb_mmu_fillunit_ecc_status_corrected_err_pde0_data_m()) {
579 nvgpu_log(g, gpu_dbg_intr, "corrected ecc pde0 data error"); 610 nvgpu_log(g, gpu_dbg_intr, "corrected ecc pde0 data error");
580 if (ecc_status & fb_mmu_fillunit_ecc_status_uncorrected_err_pde0_data_m()) 611 }
612 if (ecc_status &
613 fb_mmu_fillunit_ecc_status_uncorrected_err_pde0_data_m()) {
581 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc pde0 data error"); 614 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc pde0 data error");
615 }
582 616
583 if (corrected_overflow || uncorrected_overflow) 617 if (corrected_overflow || uncorrected_overflow) {
584 nvgpu_info(g, "mmu fillunit ecc counter overflow!"); 618 nvgpu_info(g, "mmu fillunit ecc counter overflow!");
619 }
585 620
586 nvgpu_log(g, gpu_dbg_intr, 621 nvgpu_log(g, gpu_dbg_intr,
587 "ecc error address: 0x%x", ecc_addr); 622 "ecc error address: 0x%x", ecc_addr);
@@ -594,33 +629,37 @@ void gv11b_handle_fillunit_ecc_isr(struct gk20a *g, u32 ecc_status)
594static void gv11b_fb_parse_mmfault(struct mmu_fault_info *mmfault) 629static void gv11b_fb_parse_mmfault(struct mmu_fault_info *mmfault)
595{ 630{
596 if (WARN_ON(mmfault->fault_type >= 631 if (WARN_ON(mmfault->fault_type >=
597 ARRAY_SIZE(fault_type_descs_gv11b))) 632 ARRAY_SIZE(fault_type_descs_gv11b))) {
598 mmfault->fault_type_desc = invalid_str; 633 mmfault->fault_type_desc = invalid_str;
599 else 634 } else {
600 mmfault->fault_type_desc = 635 mmfault->fault_type_desc =
601 fault_type_descs_gv11b[mmfault->fault_type]; 636 fault_type_descs_gv11b[mmfault->fault_type];
637 }
602 638
603 if (WARN_ON(mmfault->client_type >= 639 if (WARN_ON(mmfault->client_type >=
604 ARRAY_SIZE(fault_client_type_descs_gv11b))) 640 ARRAY_SIZE(fault_client_type_descs_gv11b))) {
605 mmfault->client_type_desc = invalid_str; 641 mmfault->client_type_desc = invalid_str;
606 else 642 } else {
607 mmfault->client_type_desc = 643 mmfault->client_type_desc =
608 fault_client_type_descs_gv11b[mmfault->client_type]; 644 fault_client_type_descs_gv11b[mmfault->client_type];
645 }
609 646
610 mmfault->client_id_desc = invalid_str; 647 mmfault->client_id_desc = invalid_str;
611 if (mmfault->client_type == 648 if (mmfault->client_type ==
612 gmmu_fault_client_type_hub_v()) { 649 gmmu_fault_client_type_hub_v()) {
613 650
614 if (!(WARN_ON(mmfault->client_id >= 651 if (!(WARN_ON(mmfault->client_id >=
615 ARRAY_SIZE(hub_client_descs_gv11b)))) 652 ARRAY_SIZE(hub_client_descs_gv11b)))) {
616 mmfault->client_id_desc = 653 mmfault->client_id_desc =
617 hub_client_descs_gv11b[mmfault->client_id]; 654 hub_client_descs_gv11b[mmfault->client_id];
655 }
618 } else if (mmfault->client_type == 656 } else if (mmfault->client_type ==
619 gmmu_fault_client_type_gpc_v()) { 657 gmmu_fault_client_type_gpc_v()) {
620 if (!(WARN_ON(mmfault->client_id >= 658 if (!(WARN_ON(mmfault->client_id >=
621 ARRAY_SIZE(gpc_client_descs_gv11b)))) 659 ARRAY_SIZE(gpc_client_descs_gv11b)))) {
622 mmfault->client_id_desc = 660 mmfault->client_id_desc =
623 gpc_client_descs_gv11b[mmfault->client_id]; 661 gpc_client_descs_gv11b[mmfault->client_id];
662 }
624 } 663 }
625 664
626} 665}
@@ -719,8 +758,9 @@ static void gv11b_fb_copy_from_hw_fault_buf(struct gk20a *g,
719 758
720 /* refch will be put back after fault is handled */ 759 /* refch will be put back after fault is handled */
721 refch = gk20a_refch_from_inst_ptr(g, inst_ptr); 760 refch = gk20a_refch_from_inst_ptr(g, inst_ptr);
722 if (refch) 761 if (refch) {
723 chid = refch->chid; 762 chid = refch->chid;
763 }
724 764
725 /* it is ok to continue even if refch is NULL */ 765 /* it is ok to continue even if refch is NULL */
726 mmfault->refch = refch; 766 mmfault->refch = refch;
@@ -803,8 +843,9 @@ static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
803 u32 id = FIFO_INVAL_TSG_ID; 843 u32 id = FIFO_INVAL_TSG_ID;
804 unsigned int rc_type = RC_TYPE_NO_RC; 844 unsigned int rc_type = RC_TYPE_NO_RC;
805 845
806 if (!mmfault->valid) 846 if (!mmfault->valid) {
807 return; 847 return;
848 }
808 849
809 gv11b_fb_print_fault_info(g, mmfault); 850 gv11b_fb_print_fault_info(g, mmfault);
810 851
@@ -877,8 +918,9 @@ static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
877 rc_type = RC_TYPE_MMU_FAULT; 918 rc_type = RC_TYPE_MMU_FAULT;
878 if (gk20a_is_channel_marked_as_tsg(mmfault->refch)) { 919 if (gk20a_is_channel_marked_as_tsg(mmfault->refch)) {
879 id = mmfault->refch->tsgid; 920 id = mmfault->refch->tsgid;
880 if (id != FIFO_INVAL_TSG_ID) 921 if (id != FIFO_INVAL_TSG_ID) {
881 id_type = ID_TYPE_TSG; 922 id_type = ID_TYPE_TSG;
923 }
882 } else { 924 } else {
883 nvgpu_err(g, "bare channels not supported"); 925 nvgpu_err(g, "bare channels not supported");
884 } 926 }
@@ -898,19 +940,21 @@ static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
898 mmfault->refch = NULL; 940 mmfault->refch = NULL;
899 } 941 }
900 942
901 if (rc_type != RC_TYPE_NO_RC) 943 if (rc_type != RC_TYPE_NO_RC) {
902 g->ops.fifo.teardown_ch_tsg(g, act_eng_bitmask, 944 g->ops.fifo.teardown_ch_tsg(g, act_eng_bitmask,
903 id, id_type, rc_type, mmfault); 945 id, id_type, rc_type, mmfault);
946 }
904 } else { 947 } else {
905 if (mmfault->fault_type == gmmu_fault_type_pte_v()) { 948 if (mmfault->fault_type == gmmu_fault_type_pte_v()) {
906 nvgpu_log(g, gpu_dbg_intr, "invalid pte! try to fix"); 949 nvgpu_log(g, gpu_dbg_intr, "invalid pte! try to fix");
907 err = gv11b_fb_fix_page_fault(g, mmfault); 950 err = gv11b_fb_fix_page_fault(g, mmfault);
908 if (err) 951 if (err) {
909 *invalidate_replay_val |= 952 *invalidate_replay_val |=
910 fb_mmu_invalidate_replay_cancel_global_f(); 953 fb_mmu_invalidate_replay_cancel_global_f();
911 else 954 } else {
912 *invalidate_replay_val |= 955 *invalidate_replay_val |=
913 fb_mmu_invalidate_replay_start_ack_all_f(); 956 fb_mmu_invalidate_replay_start_ack_all_f();
957 }
914 } else { 958 } else {
915 /* cancel faults other than invalid pte */ 959 /* cancel faults other than invalid pte */
916 *invalidate_replay_val |= 960 *invalidate_replay_val |=
@@ -1026,8 +1070,9 @@ void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
1026 1070
1027 } 1071 }
1028 if (index == NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX && 1072 if (index == NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX &&
1029 invalidate_replay_val != 0U) 1073 invalidate_replay_val != 0U) {
1030 gv11b_fb_replay_or_cancel_faults(g, invalidate_replay_val); 1074 gv11b_fb_replay_or_cancel_faults(g, invalidate_replay_val);
1075 }
1031} 1076}
1032 1077
1033static void gv11b_mm_copy_from_fault_snap_reg(struct gk20a *g, 1078static void gv11b_mm_copy_from_fault_snap_reg(struct gk20a *g,
@@ -1057,8 +1102,9 @@ static void gv11b_mm_copy_from_fault_snap_reg(struct gk20a *g,
1057 1102
1058 /* refch will be put back after fault is handled */ 1103 /* refch will be put back after fault is handled */
1059 refch = gk20a_refch_from_inst_ptr(g, inst_ptr); 1104 refch = gk20a_refch_from_inst_ptr(g, inst_ptr);
1060 if (refch) 1105 if (refch) {
1061 chid = refch->chid; 1106 chid = refch->chid;
1107 }
1062 1108
1063 /* It is still ok to continue if refch is NULL */ 1109 /* It is still ok to continue if refch is NULL */
1064 mmfault->refch = refch; 1110 mmfault->refch = refch;
@@ -1180,15 +1226,17 @@ static void gv11b_fb_handle_bar2_fault(struct gk20a *g,
1180{ 1226{
1181 if (fault_status & fb_mmu_fault_status_non_replayable_error_m()) { 1227 if (fault_status & fb_mmu_fault_status_non_replayable_error_m()) {
1182 if (gv11b_fb_is_fault_buf_enabled(g, 1228 if (gv11b_fb_is_fault_buf_enabled(g,
1183 NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX)) 1229 NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX)) {
1184 gv11b_fb_fault_buf_configure_hw(g, NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX); 1230 gv11b_fb_fault_buf_configure_hw(g, NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX);
1231 }
1185 } 1232 }
1186 1233
1187 if (fault_status & fb_mmu_fault_status_replayable_error_m()) { 1234 if (fault_status & fb_mmu_fault_status_replayable_error_m()) {
1188 if (gv11b_fb_is_fault_buf_enabled(g, 1235 if (gv11b_fb_is_fault_buf_enabled(g,
1189 NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX)) 1236 NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX)) {
1190 gv11b_fb_fault_buf_configure_hw(g, 1237 gv11b_fb_fault_buf_configure_hw(g,
1191 NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX); 1238 NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX);
1239 }
1192 } 1240 }
1193 gv11b_ce_mthd_buffer_fault_in_bar2_fault(g); 1241 gv11b_ce_mthd_buffer_fault_in_bar2_fault(g);
1194 1242
@@ -1224,9 +1272,10 @@ void gv11b_fb_handle_other_fault_notify(struct gk20a *g,
1224 gv11b_fb_handle_mmu_fault_common(g, mmfault, 1272 gv11b_fb_handle_mmu_fault_common(g, mmfault,
1225 &invalidate_replay_val); 1273 &invalidate_replay_val);
1226 1274
1227 if (invalidate_replay_val) 1275 if (invalidate_replay_val) {
1228 gv11b_fb_replay_or_cancel_faults(g, 1276 gv11b_fb_replay_or_cancel_faults(g,
1229 invalidate_replay_val); 1277 invalidate_replay_val);
1278 }
1230 } 1279 }
1231} 1280}
1232 1281
@@ -1254,8 +1303,9 @@ void gv11b_fb_handle_replayable_mmu_fault(struct gk20a *g)
1254{ 1303{
1255 u32 fault_status = gk20a_readl(g, fb_mmu_fault_status_r()); 1304 u32 fault_status = gk20a_readl(g, fb_mmu_fault_status_r());
1256 1305
1257 if (!(fault_status & fb_mmu_fault_status_replayable_m())) 1306 if (!(fault_status & fb_mmu_fault_status_replayable_m())) {
1258 return; 1307 return;
1308 }
1259 1309
1260 if (gv11b_fb_is_fault_buf_enabled(g, 1310 if (gv11b_fb_is_fault_buf_enabled(g,
1261 NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX)) { 1311 NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX)) {
@@ -1349,16 +1399,19 @@ void gv11b_fb_hub_isr(struct gk20a *g)
1349 nvgpu_info(g, "ecc uncorrected error notify"); 1399 nvgpu_info(g, "ecc uncorrected error notify");
1350 1400
1351 status = gk20a_readl(g, fb_mmu_l2tlb_ecc_status_r()); 1401 status = gk20a_readl(g, fb_mmu_l2tlb_ecc_status_r());
1352 if (status) 1402 if (status) {
1353 gv11b_handle_l2tlb_ecc_isr(g, status); 1403 gv11b_handle_l2tlb_ecc_isr(g, status);
1404 }
1354 1405
1355 status = gk20a_readl(g, fb_mmu_hubtlb_ecc_status_r()); 1406 status = gk20a_readl(g, fb_mmu_hubtlb_ecc_status_r());
1356 if (status) 1407 if (status) {
1357 gv11b_handle_hubtlb_ecc_isr(g, status); 1408 gv11b_handle_hubtlb_ecc_isr(g, status);
1409 }
1358 1410
1359 status = gk20a_readl(g, fb_mmu_fillunit_ecc_status_r()); 1411 status = gk20a_readl(g, fb_mmu_fillunit_ecc_status_r());
1360 if (status) 1412 if (status) {
1361 gv11b_handle_fillunit_ecc_isr(g, status); 1413 gv11b_handle_fillunit_ecc_isr(g, status);
1414 }
1362 } 1415 }
1363 if (niso_intr & 1416 if (niso_intr &
1364 (fb_niso_intr_mmu_other_fault_notify_m() | 1417 (fb_niso_intr_mmu_other_fault_notify_m() |
@@ -1382,8 +1435,9 @@ bool gv11b_fb_mmu_fault_pending(struct gk20a *g)
1382 fb_niso_intr_mmu_replayable_fault_notify_m() | 1435 fb_niso_intr_mmu_replayable_fault_notify_m() |
1383 fb_niso_intr_mmu_replayable_fault_overflow_m() | 1436 fb_niso_intr_mmu_replayable_fault_overflow_m() |
1384 fb_niso_intr_mmu_nonreplayable_fault_notify_m() | 1437 fb_niso_intr_mmu_nonreplayable_fault_notify_m() |
1385 fb_niso_intr_mmu_nonreplayable_fault_overflow_m())) 1438 fb_niso_intr_mmu_nonreplayable_fault_overflow_m())) {
1386 return true; 1439 return true;
1440 }
1387 1441
1388 return false; 1442 return false;
1389} 1443}
@@ -1420,8 +1474,9 @@ int gv11b_fb_mmu_invalidate_replay(struct gk20a *g,
1420 nvgpu_udelay(5); 1474 nvgpu_udelay(5);
1421 } while (!nvgpu_timeout_expired_msg(&timeout, 1475 } while (!nvgpu_timeout_expired_msg(&timeout,
1422 "invalidate replay failed on 0x%llx")); 1476 "invalidate replay failed on 0x%llx"));
1423 if (err) 1477 if (err) {
1424 nvgpu_err(g, "invalidate replay timedout"); 1478 nvgpu_err(g, "invalidate replay timedout");
1479 }
1425 1480
1426 nvgpu_mutex_release(&g->mm.tlb_lock); 1481 nvgpu_mutex_release(&g->mm.tlb_lock);
1427 1482
@@ -1460,8 +1515,9 @@ static int gv11b_fb_fix_page_fault(struct gk20a *g,
1460 } 1515 }
1461 1516
1462 pte[0] |= gmmu_new_pte_valid_true_f(); 1517 pte[0] |= gmmu_new_pte_valid_true_f();
1463 if (pte[0] & gmmu_new_pte_read_only_true_f()) 1518 if (pte[0] & gmmu_new_pte_read_only_true_f()) {
1464 pte[0] &= ~(gmmu_new_pte_read_only_true_f()); 1519 pte[0] &= ~(gmmu_new_pte_read_only_true_f());
1520 }
1465 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte, 1521 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte,
1466 "new pte: %#08x %#08x", pte[1], pte[0]); 1522 "new pte: %#08x %#08x", pte[1], pte[0]);
1467 1523