diff options
author | Terje Bergstrom <tbergstrom@nvidia.com> | 2018-04-18 22:39:46 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2018-05-09 21:26:04 -0400 |
commit | dd739fcb039d51606e9a5454ec0aab17bcb01965 (patch) | |
tree | 806ba8575d146367ad1be00086ca0cdae35a6b28 /drivers/gpu/nvgpu/gp10b | |
parent | 7e66f2a63d4855e763fa768047dfc32f6f96b771 (diff) |
gpu: nvgpu: Remove gk20a_dbg* functions
Switch all logging to nvgpu_log*(). gk20a_dbg* macros are
intentionally left there because of use from other repositories.
Because the new functions do not work without a pointer to struct
gk20a, and piping it just for logging is excessive, some log messages
are deleted.
Change-Id: I00e22e75fe4596a330bb0282ab4774b3639ee31e
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1704148
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b')
-rw-r--r-- | drivers/gpu/nvgpu/gp10b/ce_gp10b.c | 10 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c | 4 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gp10b/fifo_gp10b.c | 25 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gp10b/gr_gp10b.c | 123 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gp10b/ltc_gp10b.c | 14 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gp10b/mc_gp10b.c | 4 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gp10b/mm_gp10b.c | 16 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gp10b/pmu_gp10b.c | 26 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c | 14 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gp10b/therm_gp10b.c | 8 |
10 files changed, 123 insertions, 121 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/ce_gp10b.c b/drivers/gpu/nvgpu/gp10b/ce_gp10b.c index 86a2b751..e2ad1bd3 100644 --- a/drivers/gpu/nvgpu/gp10b/ce_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/ce_gp10b.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Pascal GPU series Copy Engine. | 2 | * Pascal GPU series Copy Engine. |
3 | * | 3 | * |
4 | * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. | 4 | * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), | 7 | * copy of this software and associated documentation files (the "Software"), |
@@ -30,14 +30,14 @@ | |||
30 | 30 | ||
31 | static u32 ce_blockpipe_isr(struct gk20a *g, u32 fifo_intr) | 31 | static u32 ce_blockpipe_isr(struct gk20a *g, u32 fifo_intr) |
32 | { | 32 | { |
33 | gk20a_dbg(gpu_dbg_intr, "ce blocking pipe interrupt\n"); | 33 | nvgpu_log(g, gpu_dbg_intr, "ce blocking pipe interrupt\n"); |
34 | 34 | ||
35 | return ce_intr_status_blockpipe_pending_f(); | 35 | return ce_intr_status_blockpipe_pending_f(); |
36 | } | 36 | } |
37 | 37 | ||
38 | static u32 ce_launcherr_isr(struct gk20a *g, u32 fifo_intr) | 38 | static u32 ce_launcherr_isr(struct gk20a *g, u32 fifo_intr) |
39 | { | 39 | { |
40 | gk20a_dbg(gpu_dbg_intr, "ce launch error interrupt\n"); | 40 | nvgpu_log(g, gpu_dbg_intr, "ce launch error interrupt\n"); |
41 | 41 | ||
42 | return ce_intr_status_launcherr_pending_f(); | 42 | return ce_intr_status_launcherr_pending_f(); |
43 | } | 43 | } |
@@ -47,7 +47,7 @@ void gp10b_ce_isr(struct gk20a *g, u32 inst_id, u32 pri_base) | |||
47 | u32 ce_intr = gk20a_readl(g, ce_intr_status_r(inst_id)); | 47 | u32 ce_intr = gk20a_readl(g, ce_intr_status_r(inst_id)); |
48 | u32 clear_intr = 0; | 48 | u32 clear_intr = 0; |
49 | 49 | ||
50 | gk20a_dbg(gpu_dbg_intr, "ce isr %08x %08x\n", ce_intr, inst_id); | 50 | nvgpu_log(g, gpu_dbg_intr, "ce isr %08x %08x\n", ce_intr, inst_id); |
51 | 51 | ||
52 | /* clear blocking interrupts: they exibit broken behavior */ | 52 | /* clear blocking interrupts: they exibit broken behavior */ |
53 | if (ce_intr & ce_intr_status_blockpipe_pending_f()) | 53 | if (ce_intr & ce_intr_status_blockpipe_pending_f()) |
@@ -65,7 +65,7 @@ int gp10b_ce_nonstall_isr(struct gk20a *g, u32 inst_id, u32 pri_base) | |||
65 | int ops = 0; | 65 | int ops = 0; |
66 | u32 ce_intr = gk20a_readl(g, ce_intr_status_r(inst_id)); | 66 | u32 ce_intr = gk20a_readl(g, ce_intr_status_r(inst_id)); |
67 | 67 | ||
68 | gk20a_dbg(gpu_dbg_intr, "ce nonstall isr %08x %08x\n", ce_intr, inst_id); | 68 | nvgpu_log(g, gpu_dbg_intr, "ce nonstall isr %08x %08x\n", ce_intr, inst_id); |
69 | 69 | ||
70 | if (ce_intr & ce_intr_status_nonblockpipe_pending_f()) { | 70 | if (ce_intr & ce_intr_status_nonblockpipe_pending_f()) { |
71 | gk20a_writel(g, ce_intr_status_r(inst_id), | 71 | gk20a_writel(g, ce_intr_status_r(inst_id), |
diff --git a/drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c b/drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c index 511d565a..c477c77d 100644 --- a/drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/fecs_trace_gp10b.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * GP10B GPU FECS traces | 2 | * GP10B GPU FECS traces |
3 | * | 3 | * |
4 | * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. | 4 | * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), | 7 | * copy of this software and associated documentation files (the "Software"), |
@@ -43,7 +43,7 @@ int gp10b_fecs_trace_flush(struct gk20a *g) | |||
43 | }; | 43 | }; |
44 | int err; | 44 | int err; |
45 | 45 | ||
46 | gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, ""); | 46 | nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, " "); |
47 | 47 | ||
48 | err = gr_gk20a_elpg_protected_call(g, | 48 | err = gr_gk20a_elpg_protected_call(g, |
49 | gr_gk20a_submit_fecs_method_op(g, op, false)); | 49 | gr_gk20a_submit_fecs_method_op(g, op, false)); |
diff --git a/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c b/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c index 66f3012f..fd4ec34e 100644 --- a/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/fifo_gp10b.c | |||
@@ -43,7 +43,7 @@ static void gp10b_set_pdb_fault_replay_flags(struct gk20a *g, | |||
43 | { | 43 | { |
44 | u32 val; | 44 | u32 val; |
45 | 45 | ||
46 | gk20a_dbg_fn(""); | 46 | nvgpu_log_fn(g, " "); |
47 | 47 | ||
48 | val = nvgpu_mem_rd32(g, mem, | 48 | val = nvgpu_mem_rd32(g, mem, |
49 | ram_in_page_dir_base_fault_replay_tex_w()); | 49 | ram_in_page_dir_base_fault_replay_tex_w()); |
@@ -59,7 +59,7 @@ static void gp10b_set_pdb_fault_replay_flags(struct gk20a *g, | |||
59 | nvgpu_mem_wr32(g, mem, | 59 | nvgpu_mem_wr32(g, mem, |
60 | ram_in_page_dir_base_fault_replay_gcc_w(), val); | 60 | ram_in_page_dir_base_fault_replay_gcc_w(), val); |
61 | 61 | ||
62 | gk20a_dbg_fn("done"); | 62 | nvgpu_log_fn(g, "done"); |
63 | } | 63 | } |
64 | 64 | ||
65 | int channel_gp10b_commit_userd(struct channel_gk20a *c) | 65 | int channel_gp10b_commit_userd(struct channel_gk20a *c) |
@@ -68,12 +68,12 @@ int channel_gp10b_commit_userd(struct channel_gk20a *c) | |||
68 | u32 addr_hi; | 68 | u32 addr_hi; |
69 | struct gk20a *g = c->g; | 69 | struct gk20a *g = c->g; |
70 | 70 | ||
71 | gk20a_dbg_fn(""); | 71 | nvgpu_log_fn(g, " "); |
72 | 72 | ||
73 | addr_lo = u64_lo32(c->userd_iova >> ram_userd_base_shift_v()); | 73 | addr_lo = u64_lo32(c->userd_iova >> ram_userd_base_shift_v()); |
74 | addr_hi = u64_hi32(c->userd_iova); | 74 | addr_hi = u64_hi32(c->userd_iova); |
75 | 75 | ||
76 | gk20a_dbg_info("channel %d : set ramfc userd 0x%16llx", | 76 | nvgpu_log_info(g, "channel %d : set ramfc userd 0x%16llx", |
77 | c->chid, (u64)c->userd_iova); | 77 | c->chid, (u64)c->userd_iova); |
78 | 78 | ||
79 | nvgpu_mem_wr32(g, &c->inst_block, | 79 | nvgpu_mem_wr32(g, &c->inst_block, |
@@ -98,7 +98,7 @@ int channel_gp10b_setup_ramfc(struct channel_gk20a *c, | |||
98 | struct gk20a *g = c->g; | 98 | struct gk20a *g = c->g; |
99 | struct nvgpu_mem *mem = &c->inst_block; | 99 | struct nvgpu_mem *mem = &c->inst_block; |
100 | 100 | ||
101 | gk20a_dbg_fn(""); | 101 | nvgpu_log_fn(g, " "); |
102 | 102 | ||
103 | nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); | 103 | nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); |
104 | 104 | ||
@@ -167,8 +167,9 @@ int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c) | |||
167 | { | 167 | { |
168 | u32 new_syncpt = 0, old_syncpt; | 168 | u32 new_syncpt = 0, old_syncpt; |
169 | u32 v; | 169 | u32 v; |
170 | struct gk20a *g = c->g; | ||
170 | 171 | ||
171 | gk20a_dbg_fn(""); | 172 | nvgpu_log_fn(g, " "); |
172 | 173 | ||
173 | v = nvgpu_mem_rd32(c->g, &c->inst_block, | 174 | v = nvgpu_mem_rd32(c->g, &c->inst_block, |
174 | ram_fc_allowed_syncpoints_w()); | 175 | ram_fc_allowed_syncpoints_w()); |
@@ -185,7 +186,7 @@ int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c) | |||
185 | 186 | ||
186 | v = pbdma_allowed_syncpoints_0_valid_f(1); | 187 | v = pbdma_allowed_syncpoints_0_valid_f(1); |
187 | 188 | ||
188 | gk20a_dbg_info("Channel %d, syncpt id %d\n", | 189 | nvgpu_log_info(g, "Channel %d, syncpt id %d\n", |
189 | c->chid, new_syncpt); | 190 | c->chid, new_syncpt); |
190 | 191 | ||
191 | v |= pbdma_allowed_syncpoints_0_index_f(new_syncpt); | 192 | v |= pbdma_allowed_syncpoints_0_index_f(new_syncpt); |
@@ -197,7 +198,7 @@ int gp10b_fifo_resetup_ramfc(struct channel_gk20a *c) | |||
197 | /* enable channel */ | 198 | /* enable channel */ |
198 | gk20a_enable_channel_tsg(c->g, c); | 199 | gk20a_enable_channel_tsg(c->g, c); |
199 | 200 | ||
200 | gk20a_dbg_fn("done"); | 201 | nvgpu_log_fn(g, "done"); |
201 | 202 | ||
202 | return 0; | 203 | return 0; |
203 | } | 204 | } |
@@ -207,7 +208,7 @@ int gp10b_fifo_engine_enum_from_type(struct gk20a *g, u32 engine_type, | |||
207 | { | 208 | { |
208 | int ret = ENGINE_INVAL_GK20A; | 209 | int ret = ENGINE_INVAL_GK20A; |
209 | 210 | ||
210 | gk20a_dbg_info("engine type %d", engine_type); | 211 | nvgpu_log_info(g, "engine type %d", engine_type); |
211 | if (engine_type == top_device_info_type_enum_graphics_v()) | 212 | if (engine_type == top_device_info_type_enum_graphics_v()) |
212 | ret = ENGINE_GR_GK20A; | 213 | ret = ENGINE_GR_GK20A; |
213 | else if (engine_type == top_device_info_type_enum_lce_v()) { | 214 | else if (engine_type == top_device_info_type_enum_lce_v()) { |
@@ -229,13 +230,13 @@ void gp10b_device_info_data_parse(struct gk20a *g, u32 table_entry, | |||
229 | *pri_base = | 230 | *pri_base = |
230 | (top_device_info_data_pri_base_v(table_entry) | 231 | (top_device_info_data_pri_base_v(table_entry) |
231 | << top_device_info_data_pri_base_align_v()); | 232 | << top_device_info_data_pri_base_align_v()); |
232 | gk20a_dbg_info("device info: pri_base: %d", *pri_base); | 233 | nvgpu_log_info(g, "device info: pri_base: %d", *pri_base); |
233 | } | 234 | } |
234 | if (fault_id && (top_device_info_data_fault_id_v(table_entry) == | 235 | if (fault_id && (top_device_info_data_fault_id_v(table_entry) == |
235 | top_device_info_data_fault_id_valid_v())) { | 236 | top_device_info_data_fault_id_valid_v())) { |
236 | *fault_id = | 237 | *fault_id = |
237 | g->ops.fifo.device_info_fault_id(table_entry); | 238 | g->ops.fifo.device_info_fault_id(table_entry); |
238 | gk20a_dbg_info("device info: fault_id: %d", *fault_id); | 239 | nvgpu_log_info(g, "device info: fault_id: %d", *fault_id); |
239 | } | 240 | } |
240 | } else | 241 | } else |
241 | nvgpu_err(g, "unknown device_info_data %d", | 242 | nvgpu_err(g, "unknown device_info_data %d", |
@@ -293,7 +294,7 @@ void gp10b_fifo_get_mmu_fault_info(struct gk20a *g, u32 mmu_fault_id, | |||
293 | u32 fault_info; | 294 | u32 fault_info; |
294 | u32 addr_lo, addr_hi; | 295 | u32 addr_lo, addr_hi; |
295 | 296 | ||
296 | gk20a_dbg_fn("mmu_fault_id %d", mmu_fault_id); | 297 | nvgpu_log_fn(g, "mmu_fault_id %d", mmu_fault_id); |
297 | 298 | ||
298 | memset(mmfault, 0, sizeof(*mmfault)); | 299 | memset(mmfault, 0, sizeof(*mmfault)); |
299 | 300 | ||
diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c index 0178abbf..bc982d30 100644 --- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c | |||
@@ -69,7 +69,7 @@ bool gr_gp10b_is_valid_class(struct gk20a *g, u32 class_num) | |||
69 | default: | 69 | default: |
70 | break; | 70 | break; |
71 | } | 71 | } |
72 | gk20a_dbg_info("class=0x%x valid=%d", class_num, valid); | 72 | nvgpu_log_info(g, "class=0x%x valid=%d", class_num, valid); |
73 | return valid; | 73 | return valid; |
74 | } | 74 | } |
75 | 75 | ||
@@ -169,7 +169,7 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g, | |||
169 | gr_pri_gpc0_tpc0_sm_lrf_ecc_double_err_count_r() + offset, | 169 | gr_pri_gpc0_tpc0_sm_lrf_ecc_double_err_count_r() + offset, |
170 | 0); | 170 | 0); |
171 | if (lrf_ecc_sed_status) { | 171 | if (lrf_ecc_sed_status) { |
172 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, | 172 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, |
173 | "Single bit error detected in SM LRF!"); | 173 | "Single bit error detected in SM LRF!"); |
174 | 174 | ||
175 | gr_gp10b_sm_lrf_ecc_overcount_war(1, | 175 | gr_gp10b_sm_lrf_ecc_overcount_war(1, |
@@ -181,7 +181,7 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g, | |||
181 | lrf_single_count_delta; | 181 | lrf_single_count_delta; |
182 | } | 182 | } |
183 | if (lrf_ecc_ded_status) { | 183 | if (lrf_ecc_ded_status) { |
184 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, | 184 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, |
185 | "Double bit error detected in SM LRF!"); | 185 | "Double bit error detected in SM LRF!"); |
186 | 186 | ||
187 | gr_gp10b_sm_lrf_ecc_overcount_war(0, | 187 | gr_gp10b_sm_lrf_ecc_overcount_war(0, |
@@ -208,7 +208,7 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g, | |||
208 | gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_detected_shm1_pending_f()) ) { | 208 | gr_pri_gpc0_tpc0_sm_shm_ecc_status_single_err_detected_shm1_pending_f()) ) { |
209 | u32 ecc_stats_reg_val; | 209 | u32 ecc_stats_reg_val; |
210 | 210 | ||
211 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, | 211 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, |
212 | "Single bit error detected in SM SHM!"); | 212 | "Single bit error detected in SM SHM!"); |
213 | 213 | ||
214 | ecc_stats_reg_val = | 214 | ecc_stats_reg_val = |
@@ -230,7 +230,7 @@ int gr_gp10b_handle_sm_exception(struct gk20a *g, | |||
230 | gr_pri_gpc0_tpc0_sm_shm_ecc_status_double_err_detected_shm1_pending_f()) ) { | 230 | gr_pri_gpc0_tpc0_sm_shm_ecc_status_double_err_detected_shm1_pending_f()) ) { |
231 | u32 ecc_stats_reg_val; | 231 | u32 ecc_stats_reg_val; |
232 | 232 | ||
233 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, | 233 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, |
234 | "Double bit error detected in SM SHM!"); | 234 | "Double bit error detected in SM SHM!"); |
235 | 235 | ||
236 | ecc_stats_reg_val = | 236 | ecc_stats_reg_val = |
@@ -260,14 +260,14 @@ int gr_gp10b_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc, | |||
260 | u32 esr; | 260 | u32 esr; |
261 | u32 ecc_stats_reg_val; | 261 | u32 ecc_stats_reg_val; |
262 | 262 | ||
263 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | 263 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); |
264 | 264 | ||
265 | esr = gk20a_readl(g, | 265 | esr = gk20a_readl(g, |
266 | gr_gpc0_tpc0_tex_m_hww_esr_r() + offset); | 266 | gr_gpc0_tpc0_tex_m_hww_esr_r() + offset); |
267 | gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, "0x%08x", esr); | 267 | nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "0x%08x", esr); |
268 | 268 | ||
269 | if (esr & gr_gpc0_tpc0_tex_m_hww_esr_ecc_sec_pending_f()) { | 269 | if (esr & gr_gpc0_tpc0_tex_m_hww_esr_ecc_sec_pending_f()) { |
270 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, | 270 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, |
271 | "Single bit error detected in TEX!"); | 271 | "Single bit error detected in TEX!"); |
272 | 272 | ||
273 | /* Pipe 0 counters */ | 273 | /* Pipe 0 counters */ |
@@ -323,7 +323,7 @@ int gr_gp10b_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc, | |||
323 | gr_pri_gpc0_tpc0_tex_m_routing_sel_default_f()); | 323 | gr_pri_gpc0_tpc0_tex_m_routing_sel_default_f()); |
324 | } | 324 | } |
325 | if (esr & gr_gpc0_tpc0_tex_m_hww_esr_ecc_ded_pending_f()) { | 325 | if (esr & gr_gpc0_tpc0_tex_m_hww_esr_ecc_ded_pending_f()) { |
326 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, | 326 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, |
327 | "Double bit error detected in TEX!"); | 327 | "Double bit error detected in TEX!"); |
328 | 328 | ||
329 | /* Pipe 0 counters */ | 329 | /* Pipe 0 counters */ |
@@ -403,7 +403,7 @@ int gr_gp10b_commit_global_cb_manager(struct gk20a *g, | |||
403 | u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); | 403 | u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); |
404 | u32 num_pes_per_gpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_PES_PER_GPC); | 404 | u32 num_pes_per_gpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_PES_PER_GPC); |
405 | 405 | ||
406 | gk20a_dbg_fn(""); | 406 | nvgpu_log_fn(g, " "); |
407 | 407 | ||
408 | tsg = tsg_gk20a_from_ch(c); | 408 | tsg = tsg_gk20a_from_ch(c); |
409 | if (!tsg) | 409 | if (!tsg) |
@@ -660,21 +660,21 @@ static void gr_gp10b_set_coalesce_buffer_size(struct gk20a *g, u32 data) | |||
660 | { | 660 | { |
661 | u32 val; | 661 | u32 val; |
662 | 662 | ||
663 | gk20a_dbg_fn(""); | 663 | nvgpu_log_fn(g, " "); |
664 | 664 | ||
665 | val = gk20a_readl(g, gr_gpcs_tc_debug0_r()); | 665 | val = gk20a_readl(g, gr_gpcs_tc_debug0_r()); |
666 | val = set_field(val, gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(), | 666 | val = set_field(val, gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(), |
667 | gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(data)); | 667 | gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(data)); |
668 | gk20a_writel(g, gr_gpcs_tc_debug0_r(), val); | 668 | gk20a_writel(g, gr_gpcs_tc_debug0_r(), val); |
669 | 669 | ||
670 | gk20a_dbg_fn("done"); | 670 | nvgpu_log_fn(g, "done"); |
671 | } | 671 | } |
672 | 672 | ||
673 | void gr_gp10b_set_bes_crop_debug3(struct gk20a *g, u32 data) | 673 | void gr_gp10b_set_bes_crop_debug3(struct gk20a *g, u32 data) |
674 | { | 674 | { |
675 | u32 val; | 675 | u32 val; |
676 | 676 | ||
677 | gk20a_dbg_fn(""); | 677 | nvgpu_log_fn(g, " "); |
678 | 678 | ||
679 | val = gk20a_readl(g, gr_bes_crop_debug3_r()); | 679 | val = gk20a_readl(g, gr_bes_crop_debug3_r()); |
680 | if ((data & 1)) { | 680 | if ((data & 1)) { |
@@ -722,7 +722,7 @@ void gr_gp10b_set_bes_crop_debug4(struct gk20a *g, u32 data) | |||
722 | int gr_gp10b_handle_sw_method(struct gk20a *g, u32 addr, | 722 | int gr_gp10b_handle_sw_method(struct gk20a *g, u32 addr, |
723 | u32 class_num, u32 offset, u32 data) | 723 | u32 class_num, u32 offset, u32 data) |
724 | { | 724 | { |
725 | gk20a_dbg_fn(""); | 725 | nvgpu_log_fn(g, " "); |
726 | 726 | ||
727 | if (class_num == PASCAL_COMPUTE_A) { | 727 | if (class_num == PASCAL_COMPUTE_A) { |
728 | switch (offset << 2) { | 728 | switch (offset << 2) { |
@@ -800,7 +800,7 @@ void gr_gp10b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data) | |||
800 | u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); | 800 | u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); |
801 | u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); | 801 | u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); |
802 | 802 | ||
803 | gk20a_dbg_fn(""); | 803 | nvgpu_log_fn(g, " "); |
804 | 804 | ||
805 | if (alpha_cb_size > gr->alpha_cb_size) | 805 | if (alpha_cb_size > gr->alpha_cb_size) |
806 | alpha_cb_size = gr->alpha_cb_size; | 806 | alpha_cb_size = gr->alpha_cb_size; |
@@ -853,7 +853,7 @@ void gr_gp10b_set_circular_buffer_size(struct gk20a *g, u32 data) | |||
853 | u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); | 853 | u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); |
854 | u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); | 854 | u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE); |
855 | 855 | ||
856 | gk20a_dbg_fn(""); | 856 | nvgpu_log_fn(g, " "); |
857 | 857 | ||
858 | if (cb_size_steady > gr->attrib_cb_size) | 858 | if (cb_size_steady > gr->attrib_cb_size) |
859 | cb_size_steady = gr->attrib_cb_size; | 859 | cb_size_steady = gr->attrib_cb_size; |
@@ -923,7 +923,7 @@ int gr_gp10b_init_ctx_state(struct gk20a *g) | |||
923 | }; | 923 | }; |
924 | int err; | 924 | int err; |
925 | 925 | ||
926 | gk20a_dbg_fn(""); | 926 | nvgpu_log_fn(g, " "); |
927 | 927 | ||
928 | err = gr_gk20a_init_ctx_state(g); | 928 | err = gr_gk20a_init_ctx_state(g); |
929 | if (err) | 929 | if (err) |
@@ -940,10 +940,10 @@ int gr_gp10b_init_ctx_state(struct gk20a *g) | |||
940 | } | 940 | } |
941 | } | 941 | } |
942 | 942 | ||
943 | gk20a_dbg_info("preempt image size: %u", | 943 | nvgpu_log_info(g, "preempt image size: %u", |
944 | g->gr.ctx_vars.preempt_image_size); | 944 | g->gr.ctx_vars.preempt_image_size); |
945 | 945 | ||
946 | gk20a_dbg_fn("done"); | 946 | nvgpu_log_fn(g, "done"); |
947 | 947 | ||
948 | return 0; | 948 | return 0; |
949 | } | 949 | } |
@@ -952,8 +952,9 @@ int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size, | |||
952 | struct nvgpu_mem *mem) | 952 | struct nvgpu_mem *mem) |
953 | { | 953 | { |
954 | int err; | 954 | int err; |
955 | struct gk20a *g = gk20a_from_vm(vm); | ||
955 | 956 | ||
956 | gk20a_dbg_fn(""); | 957 | nvgpu_log_fn(g, " "); |
957 | 958 | ||
958 | err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem); | 959 | err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem); |
959 | if (err) | 960 | if (err) |
@@ -1029,9 +1030,9 @@ int gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g, | |||
1029 | g->gr.max_tpc_count; | 1030 | g->gr.max_tpc_count; |
1030 | attrib_cb_size = ALIGN(attrib_cb_size, 128); | 1031 | attrib_cb_size = ALIGN(attrib_cb_size, 128); |
1031 | 1032 | ||
1032 | gk20a_dbg_info("gfxp context spill_size=%d", spill_size); | 1033 | nvgpu_log_info(g, "gfxp context spill_size=%d", spill_size); |
1033 | gk20a_dbg_info("gfxp context pagepool_size=%d", pagepool_size); | 1034 | nvgpu_log_info(g, "gfxp context pagepool_size=%d", pagepool_size); |
1034 | gk20a_dbg_info("gfxp context attrib_cb_size=%d", | 1035 | nvgpu_log_info(g, "gfxp context attrib_cb_size=%d", |
1035 | attrib_cb_size); | 1036 | attrib_cb_size); |
1036 | 1037 | ||
1037 | err = gr_gp10b_alloc_buffer(vm, | 1038 | err = gr_gp10b_alloc_buffer(vm, |
@@ -1112,7 +1113,7 @@ int gr_gp10b_alloc_gr_ctx(struct gk20a *g, | |||
1112 | u32 graphics_preempt_mode = 0; | 1113 | u32 graphics_preempt_mode = 0; |
1113 | u32 compute_preempt_mode = 0; | 1114 | u32 compute_preempt_mode = 0; |
1114 | 1115 | ||
1115 | gk20a_dbg_fn(""); | 1116 | nvgpu_log_fn(g, " "); |
1116 | 1117 | ||
1117 | err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags); | 1118 | err = gr_gk20a_alloc_gr_ctx(g, gr_ctx, vm, class, flags); |
1118 | if (err) | 1119 | if (err) |
@@ -1137,7 +1138,7 @@ int gr_gp10b_alloc_gr_ctx(struct gk20a *g, | |||
1137 | goto fail_free_gk20a_ctx; | 1138 | goto fail_free_gk20a_ctx; |
1138 | } | 1139 | } |
1139 | 1140 | ||
1140 | gk20a_dbg_fn("done"); | 1141 | nvgpu_log_fn(g, "done"); |
1141 | 1142 | ||
1142 | return 0; | 1143 | return 0; |
1143 | 1144 | ||
@@ -1215,7 +1216,7 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g, | |||
1215 | ctxsw_prog_main_image_compute_preemption_options_control_cta_f(); | 1216 | ctxsw_prog_main_image_compute_preemption_options_control_cta_f(); |
1216 | int err; | 1217 | int err; |
1217 | 1218 | ||
1218 | gk20a_dbg_fn(""); | 1219 | nvgpu_log_fn(g, " "); |
1219 | 1220 | ||
1220 | tsg = tsg_gk20a_from_ch(c); | 1221 | tsg = tsg_gk20a_from_ch(c); |
1221 | if (!tsg) | 1222 | if (!tsg) |
@@ -1224,21 +1225,21 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g, | |||
1224 | gr_ctx = &tsg->gr_ctx; | 1225 | gr_ctx = &tsg->gr_ctx; |
1225 | 1226 | ||
1226 | if (gr_ctx->graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) { | 1227 | if (gr_ctx->graphics_preempt_mode == NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP) { |
1227 | gk20a_dbg_info("GfxP: %x", gfxp_preempt_option); | 1228 | nvgpu_log_info(g, "GfxP: %x", gfxp_preempt_option); |
1228 | nvgpu_mem_wr(g, mem, | 1229 | nvgpu_mem_wr(g, mem, |
1229 | ctxsw_prog_main_image_graphics_preemption_options_o(), | 1230 | ctxsw_prog_main_image_graphics_preemption_options_o(), |
1230 | gfxp_preempt_option); | 1231 | gfxp_preempt_option); |
1231 | } | 1232 | } |
1232 | 1233 | ||
1233 | if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP) { | 1234 | if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CILP) { |
1234 | gk20a_dbg_info("CILP: %x", cilp_preempt_option); | 1235 | nvgpu_log_info(g, "CILP: %x", cilp_preempt_option); |
1235 | nvgpu_mem_wr(g, mem, | 1236 | nvgpu_mem_wr(g, mem, |
1236 | ctxsw_prog_main_image_compute_preemption_options_o(), | 1237 | ctxsw_prog_main_image_compute_preemption_options_o(), |
1237 | cilp_preempt_option); | 1238 | cilp_preempt_option); |
1238 | } | 1239 | } |
1239 | 1240 | ||
1240 | if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) { | 1241 | if (gr_ctx->compute_preempt_mode == NVGPU_PREEMPTION_MODE_COMPUTE_CTA) { |
1241 | gk20a_dbg_info("CTA: %x", cta_preempt_option); | 1242 | nvgpu_log_info(g, "CTA: %x", cta_preempt_option); |
1242 | nvgpu_mem_wr(g, mem, | 1243 | nvgpu_mem_wr(g, mem, |
1243 | ctxsw_prog_main_image_compute_preemption_options_o(), | 1244 | ctxsw_prog_main_image_compute_preemption_options_o(), |
1244 | cta_preempt_option); | 1245 | cta_preempt_option); |
@@ -1269,7 +1270,7 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g, | |||
1269 | (u64_hi32(gr_ctx->betacb_ctxsw_buffer.gpu_va) << | 1270 | (u64_hi32(gr_ctx->betacb_ctxsw_buffer.gpu_va) << |
1270 | (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v())); | 1271 | (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v())); |
1271 | 1272 | ||
1272 | gk20a_dbg_info("attrib cb addr : 0x%016x", addr); | 1273 | nvgpu_log_info(g, "attrib cb addr : 0x%016x", addr); |
1273 | g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, true); | 1274 | g->ops.gr.commit_global_attrib_cb(g, gr_ctx, addr, true); |
1274 | 1275 | ||
1275 | addr = (u64_lo32(gr_ctx->pagepool_ctxsw_buffer.gpu_va) >> | 1276 | addr = (u64_lo32(gr_ctx->pagepool_ctxsw_buffer.gpu_va) >> |
@@ -1315,7 +1316,7 @@ void gr_gp10b_update_ctxsw_preemption_mode(struct gk20a *g, | |||
1315 | } | 1316 | } |
1316 | 1317 | ||
1317 | out: | 1318 | out: |
1318 | gk20a_dbg_fn("done"); | 1319 | nvgpu_log_fn(g, "done"); |
1319 | } | 1320 | } |
1320 | 1321 | ||
1321 | int gr_gp10b_dump_gr_status_regs(struct gk20a *g, | 1322 | int gr_gp10b_dump_gr_status_regs(struct gk20a *g, |
@@ -1475,7 +1476,7 @@ int gr_gp10b_wait_empty(struct gk20a *g, unsigned long duration_ms, | |||
1475 | u32 activity0, activity1, activity2, activity4; | 1476 | u32 activity0, activity1, activity2, activity4; |
1476 | struct nvgpu_timeout timeout; | 1477 | struct nvgpu_timeout timeout; |
1477 | 1478 | ||
1478 | gk20a_dbg_fn(""); | 1479 | nvgpu_log_fn(g, " "); |
1479 | 1480 | ||
1480 | nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER); | 1481 | nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER); |
1481 | 1482 | ||
@@ -1500,7 +1501,7 @@ int gr_gp10b_wait_empty(struct gk20a *g, unsigned long duration_ms, | |||
1500 | gr_activity_empty_or_preempted(activity4)); | 1501 | gr_activity_empty_or_preempted(activity4)); |
1501 | 1502 | ||
1502 | if (!gr_enabled || (!gr_busy && !ctxsw_active)) { | 1503 | if (!gr_enabled || (!gr_busy && !ctxsw_active)) { |
1503 | gk20a_dbg_fn("done"); | 1504 | nvgpu_log_fn(g, "done"); |
1504 | return 0; | 1505 | return 0; |
1505 | } | 1506 | } |
1506 | 1507 | ||
@@ -1569,7 +1570,7 @@ void gr_gp10b_commit_global_bundle_cb(struct gk20a *g, | |||
1569 | 1570 | ||
1570 | data = min_t(u32, data, g->gr.min_gpm_fifo_depth); | 1571 | data = min_t(u32, data, g->gr.min_gpm_fifo_depth); |
1571 | 1572 | ||
1572 | gk20a_dbg_info("bundle cb token limit : %d, state limit : %d", | 1573 | nvgpu_log_info(g, "bundle cb token limit : %d, state limit : %d", |
1573 | g->gr.bundle_cb_token_limit, data); | 1574 | g->gr.bundle_cb_token_limit, data); |
1574 | 1575 | ||
1575 | gr_gk20a_ctx_patch_write(g, gr_ctx, gr_pd_ab_dist_cfg2_r(), | 1576 | gr_gk20a_ctx_patch_write(g, gr_ctx, gr_pd_ab_dist_cfg2_r(), |
@@ -1626,7 +1627,7 @@ int gr_gp10b_init_fs_state(struct gk20a *g) | |||
1626 | { | 1627 | { |
1627 | u32 data; | 1628 | u32 data; |
1628 | 1629 | ||
1629 | gk20a_dbg_fn(""); | 1630 | nvgpu_log_fn(g, " "); |
1630 | 1631 | ||
1631 | data = gk20a_readl(g, gr_gpcs_tpcs_sm_texio_control_r()); | 1632 | data = gk20a_readl(g, gr_gpcs_tpcs_sm_texio_control_r()); |
1632 | data = set_field(data, gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(), | 1633 | data = set_field(data, gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(), |
@@ -1705,7 +1706,7 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a | |||
1705 | { | 1706 | { |
1706 | int ret = 0; | 1707 | int ret = 0; |
1707 | 1708 | ||
1708 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); | 1709 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " "); |
1709 | 1710 | ||
1710 | ret = gk20a_disable_channel_tsg(g, fault_ch); | 1711 | ret = gk20a_disable_channel_tsg(g, fault_ch); |
1711 | if (ret) { | 1712 | if (ret) { |
@@ -1721,18 +1722,18 @@ static int gr_gp10b_disable_channel_or_tsg(struct gk20a *g, struct channel_gk20a | |||
1721 | return ret; | 1722 | return ret; |
1722 | } | 1723 | } |
1723 | 1724 | ||
1724 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: restarted runlist"); | 1725 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "CILP: restarted runlist"); |
1725 | 1726 | ||
1726 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, | 1727 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, |
1727 | "CILP: tsgid: 0x%x", fault_ch->tsgid); | 1728 | "CILP: tsgid: 0x%x", fault_ch->tsgid); |
1728 | 1729 | ||
1729 | if (gk20a_is_channel_marked_as_tsg(fault_ch)) { | 1730 | if (gk20a_is_channel_marked_as_tsg(fault_ch)) { |
1730 | gk20a_fifo_issue_preempt(g, fault_ch->tsgid, true); | 1731 | gk20a_fifo_issue_preempt(g, fault_ch->tsgid, true); |
1731 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, | 1732 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, |
1732 | "CILP: preempted tsg"); | 1733 | "CILP: preempted tsg"); |
1733 | } else { | 1734 | } else { |
1734 | gk20a_fifo_issue_preempt(g, fault_ch->chid, false); | 1735 | gk20a_fifo_issue_preempt(g, fault_ch->chid, false); |
1735 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, | 1736 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, |
1736 | "CILP: preempted channel"); | 1737 | "CILP: preempted channel"); |
1737 | } | 1738 | } |
1738 | 1739 | ||
@@ -1746,7 +1747,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, | |||
1746 | struct tsg_gk20a *tsg; | 1747 | struct tsg_gk20a *tsg; |
1747 | struct nvgpu_gr_ctx *gr_ctx; | 1748 | struct nvgpu_gr_ctx *gr_ctx; |
1748 | 1749 | ||
1749 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); | 1750 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " "); |
1750 | 1751 | ||
1751 | tsg = tsg_gk20a_from_ch(fault_ch); | 1752 | tsg = tsg_gk20a_from_ch(fault_ch); |
1752 | if (!tsg) | 1753 | if (!tsg) |
@@ -1755,7 +1756,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, | |||
1755 | gr_ctx = &tsg->gr_ctx; | 1756 | gr_ctx = &tsg->gr_ctx; |
1756 | 1757 | ||
1757 | if (gr_ctx->cilp_preempt_pending) { | 1758 | if (gr_ctx->cilp_preempt_pending) { |
1758 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, | 1759 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, |
1759 | "CILP is already pending for chid %d", | 1760 | "CILP is already pending for chid %d", |
1760 | fault_ch->chid); | 1761 | fault_ch->chid); |
1761 | return 0; | 1762 | return 0; |
@@ -1763,7 +1764,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, | |||
1763 | 1764 | ||
1764 | /* get ctx_id from the ucode image */ | 1765 | /* get ctx_id from the ucode image */ |
1765 | if (!gr_ctx->ctx_id_valid) { | 1766 | if (!gr_ctx->ctx_id_valid) { |
1766 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, | 1767 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, |
1767 | "CILP: looking up ctx id"); | 1768 | "CILP: looking up ctx id"); |
1768 | ret = gr_gk20a_get_ctx_id(g, fault_ch, &gr_ctx->ctx_id); | 1769 | ret = gr_gk20a_get_ctx_id(g, fault_ch, &gr_ctx->ctx_id); |
1769 | if (ret) { | 1770 | if (ret) { |
@@ -1773,7 +1774,7 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, | |||
1773 | gr_ctx->ctx_id_valid = true; | 1774 | gr_ctx->ctx_id_valid = true; |
1774 | } | 1775 | } |
1775 | 1776 | ||
1776 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, | 1777 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, |
1777 | "CILP: ctx id is 0x%x", gr_ctx->ctx_id); | 1778 | "CILP: ctx id is 0x%x", gr_ctx->ctx_id); |
1778 | 1779 | ||
1779 | /* send ucode method to set ctxsw interrupt */ | 1780 | /* send ucode method to set ctxsw interrupt */ |
@@ -1795,10 +1796,10 @@ int gr_gp10b_set_cilp_preempt_pending(struct gk20a *g, | |||
1795 | return ret; | 1796 | return ret; |
1796 | } | 1797 | } |
1797 | 1798 | ||
1798 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, | 1799 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, |
1799 | "CILP: enabled ctxsw completion interrupt"); | 1800 | "CILP: enabled ctxsw completion interrupt"); |
1800 | 1801 | ||
1801 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, | 1802 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, |
1802 | "CILP: disabling channel %d", | 1803 | "CILP: disabling channel %d", |
1803 | fault_ch->chid); | 1804 | fault_ch->chid); |
1804 | 1805 | ||
@@ -1826,7 +1827,7 @@ static int gr_gp10b_clear_cilp_preempt_pending(struct gk20a *g, | |||
1826 | struct tsg_gk20a *tsg; | 1827 | struct tsg_gk20a *tsg; |
1827 | struct nvgpu_gr_ctx *gr_ctx; | 1828 | struct nvgpu_gr_ctx *gr_ctx; |
1828 | 1829 | ||
1829 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); | 1830 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " "); |
1830 | 1831 | ||
1831 | tsg = tsg_gk20a_from_ch(fault_ch); | 1832 | tsg = tsg_gk20a_from_ch(fault_ch); |
1832 | if (!tsg) | 1833 | if (!tsg) |
@@ -1837,7 +1838,7 @@ static int gr_gp10b_clear_cilp_preempt_pending(struct gk20a *g, | |||
1837 | /* The ucode is self-clearing, so all we need to do here is | 1838 | /* The ucode is self-clearing, so all we need to do here is |
1838 | to clear cilp_preempt_pending. */ | 1839 | to clear cilp_preempt_pending. */ |
1839 | if (!gr_ctx->cilp_preempt_pending) { | 1840 | if (!gr_ctx->cilp_preempt_pending) { |
1840 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, | 1841 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, |
1841 | "CILP is already cleared for chid %d\n", | 1842 | "CILP is already cleared for chid %d\n", |
1842 | fault_ch->chid); | 1843 | fault_ch->chid); |
1843 | return 0; | 1844 | return 0; |
@@ -1878,7 +1879,7 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g, | |||
1878 | NVGPU_PREEMPTION_MODE_COMPUTE_CILP); | 1879 | NVGPU_PREEMPTION_MODE_COMPUTE_CILP); |
1879 | } | 1880 | } |
1880 | 1881 | ||
1881 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "SM Exception received on gpc %d tpc %d = %u\n", | 1882 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "SM Exception received on gpc %d tpc %d = %u\n", |
1882 | gpc, tpc, global_esr); | 1883 | gpc, tpc, global_esr); |
1883 | 1884 | ||
1884 | if (cilp_enabled && sm_debugger_attached) { | 1885 | if (cilp_enabled && sm_debugger_attached) { |
@@ -1900,19 +1901,19 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g, | |||
1900 | if (warp_esr != 0 || (global_esr & global_mask) != 0) { | 1901 | if (warp_esr != 0 || (global_esr & global_mask) != 0) { |
1901 | *ignore_debugger = true; | 1902 | *ignore_debugger = true; |
1902 | 1903 | ||
1903 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 1904 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
1904 | "CILP: starting wait for LOCKED_DOWN on gpc %d tpc %d\n", | 1905 | "CILP: starting wait for LOCKED_DOWN on gpc %d tpc %d\n", |
1905 | gpc, tpc); | 1906 | gpc, tpc); |
1906 | 1907 | ||
1907 | if (gk20a_dbg_gpu_broadcast_stop_trigger(fault_ch)) { | 1908 | if (gk20a_dbg_gpu_broadcast_stop_trigger(fault_ch)) { |
1908 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 1909 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
1909 | "CILP: Broadcasting STOP_TRIGGER from gpc %d tpc %d\n", | 1910 | "CILP: Broadcasting STOP_TRIGGER from gpc %d tpc %d\n", |
1910 | gpc, tpc); | 1911 | gpc, tpc); |
1911 | g->ops.gr.suspend_all_sms(g, global_mask, false); | 1912 | g->ops.gr.suspend_all_sms(g, global_mask, false); |
1912 | 1913 | ||
1913 | gk20a_dbg_gpu_clear_broadcast_stop_trigger(fault_ch); | 1914 | gk20a_dbg_gpu_clear_broadcast_stop_trigger(fault_ch); |
1914 | } else { | 1915 | } else { |
1915 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 1916 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
1916 | "CILP: STOP_TRIGGER from gpc %d tpc %d\n", | 1917 | "CILP: STOP_TRIGGER from gpc %d tpc %d\n", |
1917 | gpc, tpc); | 1918 | gpc, tpc); |
1918 | g->ops.gr.suspend_single_sm(g, gpc, tpc, sm, global_mask, true); | 1919 | g->ops.gr.suspend_single_sm(g, gpc, tpc, sm, global_mask, true); |
@@ -1923,11 +1924,11 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g, | |||
1923 | gpc, tpc, sm); | 1924 | gpc, tpc, sm); |
1924 | g->ops.gr.clear_sm_hww(g, | 1925 | g->ops.gr.clear_sm_hww(g, |
1925 | gpc, tpc, sm, global_esr_copy); | 1926 | gpc, tpc, sm, global_esr_copy); |
1926 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 1927 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
1927 | "CILP: HWWs cleared for gpc %d tpc %d\n", | 1928 | "CILP: HWWs cleared for gpc %d tpc %d\n", |
1928 | gpc, tpc); | 1929 | gpc, tpc); |
1929 | 1930 | ||
1930 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); | 1931 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); |
1931 | ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch); | 1932 | ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch); |
1932 | if (ret) { | 1933 | if (ret) { |
1933 | nvgpu_err(g, "CILP: error while setting CILP preempt pending!"); | 1934 | nvgpu_err(g, "CILP: error while setting CILP preempt pending!"); |
@@ -1936,7 +1937,7 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g, | |||
1936 | 1937 | ||
1937 | dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset); | 1938 | dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset); |
1938 | if (dbgr_control0 & gr_gpcs_tpcs_sm_dbgr_control0_single_step_mode_enable_f()) { | 1939 | if (dbgr_control0 & gr_gpcs_tpcs_sm_dbgr_control0_single_step_mode_enable_f()) { |
1939 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 1940 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
1940 | "CILP: clearing SINGLE_STEP_MODE before resume for gpc %d tpc %d\n", | 1941 | "CILP: clearing SINGLE_STEP_MODE before resume for gpc %d tpc %d\n", |
1941 | gpc, tpc); | 1942 | gpc, tpc); |
1942 | dbgr_control0 = set_field(dbgr_control0, | 1943 | dbgr_control0 = set_field(dbgr_control0, |
@@ -1945,13 +1946,13 @@ int gr_gp10b_pre_process_sm_exception(struct gk20a *g, | |||
1945 | gk20a_writel(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset, dbgr_control0); | 1946 | gk20a_writel(g, gr_gpc0_tpc0_sm_dbgr_control0_r() + offset, dbgr_control0); |
1946 | } | 1947 | } |
1947 | 1948 | ||
1948 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, | 1949 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, |
1949 | "CILP: resume for gpc %d tpc %d\n", | 1950 | "CILP: resume for gpc %d tpc %d\n", |
1950 | gpc, tpc); | 1951 | gpc, tpc); |
1951 | g->ops.gr.resume_single_sm(g, gpc, tpc, sm); | 1952 | g->ops.gr.resume_single_sm(g, gpc, tpc, sm); |
1952 | 1953 | ||
1953 | *ignore_debugger = true; | 1954 | *ignore_debugger = true; |
1954 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: All done on gpc %d, tpc %d\n", gpc, tpc); | 1955 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: All done on gpc %d, tpc %d\n", gpc, tpc); |
1955 | } | 1956 | } |
1956 | 1957 | ||
1957 | *early_exit = true; | 1958 | *early_exit = true; |
@@ -1999,14 +2000,14 @@ int gr_gp10b_handle_fecs_error(struct gk20a *g, | |||
1999 | int ret = 0; | 2000 | int ret = 0; |
2000 | struct tsg_gk20a *tsg; | 2001 | struct tsg_gk20a *tsg; |
2001 | 2002 | ||
2002 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); | 2003 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, " "); |
2003 | 2004 | ||
2004 | /* | 2005 | /* |
2005 | * INTR1 (bit 1 of the HOST_INT_STATUS_CTXSW_INTR) | 2006 | * INTR1 (bit 1 of the HOST_INT_STATUS_CTXSW_INTR) |
2006 | * indicates that a CILP ctxsw save has finished | 2007 | * indicates that a CILP ctxsw save has finished |
2007 | */ | 2008 | */ |
2008 | if (gr_fecs_intr & gr_fecs_host_int_status_ctxsw_intr_f(2)) { | 2009 | if (gr_fecs_intr & gr_fecs_host_int_status_ctxsw_intr_f(2)) { |
2009 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, | 2010 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, |
2010 | "CILP: ctxsw save completed!\n"); | 2011 | "CILP: ctxsw save completed!\n"); |
2011 | 2012 | ||
2012 | /* now clear the interrupt */ | 2013 | /* now clear the interrupt */ |
@@ -2162,7 +2163,7 @@ int gr_gp10b_suspend_contexts(struct gk20a *g, | |||
2162 | struct nvgpu_gr_ctx *gr_ctx; | 2163 | struct nvgpu_gr_ctx *gr_ctx; |
2163 | struct nvgpu_timeout timeout; | 2164 | struct nvgpu_timeout timeout; |
2164 | 2165 | ||
2165 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, | 2166 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, |
2166 | "CILP preempt pending, waiting %lu msecs for preemption", | 2167 | "CILP preempt pending, waiting %lu msecs for preemption", |
2167 | gk20a_get_gr_idle_timeout(g)); | 2168 | gk20a_get_gr_idle_timeout(g)); |
2168 | 2169 | ||
@@ -2285,7 +2286,7 @@ int gr_gp10b_set_preemption_mode(struct channel_gk20a *ch, | |||
2285 | 2286 | ||
2286 | if (g->ops.gr.set_ctxsw_preemption_mode) { | 2287 | if (g->ops.gr.set_ctxsw_preemption_mode) { |
2287 | 2288 | ||
2288 | gk20a_dbg(gpu_dbg_sched, "chid=%d tsgid=%d pid=%d " | 2289 | nvgpu_log(g, gpu_dbg_sched, "chid=%d tsgid=%d pid=%d " |
2289 | "graphics_preempt=%d compute_preempt=%d", | 2290 | "graphics_preempt=%d compute_preempt=%d", |
2290 | ch->chid, | 2291 | ch->chid, |
2291 | ch->tsgid, | 2292 | ch->tsgid, |
diff --git a/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c b/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c index 71764a7c..f74ca8f3 100644 --- a/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c | |||
@@ -41,7 +41,7 @@ int gp10b_determine_L2_size_bytes(struct gk20a *g) | |||
41 | u32 tmp; | 41 | u32 tmp; |
42 | int ret; | 42 | int ret; |
43 | 43 | ||
44 | gk20a_dbg_fn(""); | 44 | nvgpu_log_fn(g, " "); |
45 | 45 | ||
46 | tmp = gk20a_readl(g, ltc_ltc0_lts0_tstg_info_1_r()); | 46 | tmp = gk20a_readl(g, ltc_ltc0_lts0_tstg_info_1_r()); |
47 | 47 | ||
@@ -49,9 +49,9 @@ int gp10b_determine_L2_size_bytes(struct gk20a *g) | |||
49 | ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(tmp)*1024 * | 49 | ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(tmp)*1024 * |
50 | ltc_ltc0_lts0_tstg_info_1_slices_per_l2_v(tmp); | 50 | ltc_ltc0_lts0_tstg_info_1_slices_per_l2_v(tmp); |
51 | 51 | ||
52 | gk20a_dbg(gpu_dbg_info, "L2 size: %d\n", ret); | 52 | nvgpu_log(g, gpu_dbg_info, "L2 size: %d\n", ret); |
53 | 53 | ||
54 | gk20a_dbg_fn("done"); | 54 | nvgpu_log_fn(g, "done"); |
55 | 55 | ||
56 | return ret; | 56 | return ret; |
57 | } | 57 | } |
@@ -83,7 +83,7 @@ int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) | |||
83 | 83 | ||
84 | int err; | 84 | int err; |
85 | 85 | ||
86 | gk20a_dbg_fn(""); | 86 | nvgpu_log_fn(g, " "); |
87 | 87 | ||
88 | if (max_comptag_lines == 0U) | 88 | if (max_comptag_lines == 0U) |
89 | return 0; | 89 | return 0; |
@@ -109,11 +109,11 @@ int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr) | |||
109 | /* must be a multiple of 64KB */ | 109 | /* must be a multiple of 64KB */ |
110 | compbit_backing_size = roundup(compbit_backing_size, 64*1024); | 110 | compbit_backing_size = roundup(compbit_backing_size, 64*1024); |
111 | 111 | ||
112 | gk20a_dbg_info("compbit backing store size : %d", | 112 | nvgpu_log_info(g, "compbit backing store size : %d", |
113 | compbit_backing_size); | 113 | compbit_backing_size); |
114 | gk20a_dbg_info("max comptag lines : %d", | 114 | nvgpu_log_info(g, "max comptag lines : %d", |
115 | max_comptag_lines); | 115 | max_comptag_lines); |
116 | gk20a_dbg_info("gobs_per_comptagline_per_slice: %d", | 116 | nvgpu_log_info(g, "gobs_per_comptagline_per_slice: %d", |
117 | gobs_per_comptagline_per_slice); | 117 | gobs_per_comptagline_per_slice); |
118 | 118 | ||
119 | err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); | 119 | err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size); |
diff --git a/drivers/gpu/nvgpu/gp10b/mc_gp10b.c b/drivers/gpu/nvgpu/gp10b/mc_gp10b.c index dde12854..5969e45d 100644 --- a/drivers/gpu/nvgpu/gp10b/mc_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/mc_gp10b.c | |||
@@ -87,7 +87,7 @@ void mc_gp10b_isr_stall(struct gk20a *g) | |||
87 | 87 | ||
88 | mc_intr_0 = gk20a_readl(g, mc_intr_r(0)); | 88 | mc_intr_0 = gk20a_readl(g, mc_intr_r(0)); |
89 | 89 | ||
90 | gk20a_dbg(gpu_dbg_intr, "stall intr 0x%08x\n", mc_intr_0); | 90 | nvgpu_log(g, gpu_dbg_intr, "stall intr 0x%08x\n", mc_intr_0); |
91 | 91 | ||
92 | for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) { | 92 | for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) { |
93 | active_engine_id = g->fifo.active_engines_list[engine_id_idx]; | 93 | active_engine_id = g->fifo.active_engines_list[engine_id_idx]; |
@@ -126,7 +126,7 @@ void mc_gp10b_isr_stall(struct gk20a *g) | |||
126 | g->ops.mc.is_intr_nvlink_pending(g, mc_intr_0)) | 126 | g->ops.mc.is_intr_nvlink_pending(g, mc_intr_0)) |
127 | g->ops.nvlink.isr(g); | 127 | g->ops.nvlink.isr(g); |
128 | 128 | ||
129 | gk20a_dbg(gpu_dbg_intr, "stall intr done 0x%08x\n", mc_intr_0); | 129 | nvgpu_log(g, gpu_dbg_intr, "stall intr done 0x%08x\n", mc_intr_0); |
130 | 130 | ||
131 | } | 131 | } |
132 | 132 | ||
diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c index 978b6f50..811697c3 100644 --- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * GP10B MMU | 2 | * GP10B MMU |
3 | * | 3 | * |
4 | * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. | 4 | * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), | 7 | * copy of this software and associated documentation files (the "Software"), |
@@ -53,7 +53,7 @@ int gp10b_init_mm_setup_hw(struct gk20a *g) | |||
53 | struct nvgpu_mem *inst_block = &mm->bar1.inst_block; | 53 | struct nvgpu_mem *inst_block = &mm->bar1.inst_block; |
54 | int err = 0; | 54 | int err = 0; |
55 | 55 | ||
56 | gk20a_dbg_fn(""); | 56 | nvgpu_log_fn(g, " "); |
57 | 57 | ||
58 | g->ops.fb.set_mmu_page_size(g); | 58 | g->ops.fb.set_mmu_page_size(g); |
59 | 59 | ||
@@ -73,7 +73,7 @@ int gp10b_init_mm_setup_hw(struct gk20a *g) | |||
73 | 73 | ||
74 | err = gp10b_replayable_pagefault_buffer_init(g); | 74 | err = gp10b_replayable_pagefault_buffer_init(g); |
75 | 75 | ||
76 | gk20a_dbg_fn("done"); | 76 | nvgpu_log_fn(g, "done"); |
77 | return err; | 77 | return err; |
78 | 78 | ||
79 | } | 79 | } |
@@ -87,7 +87,7 @@ int gp10b_init_bar2_vm(struct gk20a *g) | |||
87 | 87 | ||
88 | /* BAR2 aperture size is 32MB */ | 88 | /* BAR2 aperture size is 32MB */ |
89 | mm->bar2.aperture_size = 32 << 20; | 89 | mm->bar2.aperture_size = 32 << 20; |
90 | gk20a_dbg_info("bar2 vm size = 0x%x", mm->bar2.aperture_size); | 90 | nvgpu_log_info(g, "bar2 vm size = 0x%x", mm->bar2.aperture_size); |
91 | 91 | ||
92 | mm->bar2.vm = nvgpu_vm_init(g, big_page_size, SZ_4K, | 92 | mm->bar2.vm = nvgpu_vm_init(g, big_page_size, SZ_4K, |
93 | mm->bar2.aperture_size - SZ_4K, | 93 | mm->bar2.aperture_size - SZ_4K, |
@@ -115,12 +115,12 @@ int gp10b_init_bar2_mm_hw_setup(struct gk20a *g) | |||
115 | struct nvgpu_mem *inst_block = &mm->bar2.inst_block; | 115 | struct nvgpu_mem *inst_block = &mm->bar2.inst_block; |
116 | u64 inst_pa = nvgpu_inst_block_addr(g, inst_block); | 116 | u64 inst_pa = nvgpu_inst_block_addr(g, inst_block); |
117 | 117 | ||
118 | gk20a_dbg_fn(""); | 118 | nvgpu_log_fn(g, " "); |
119 | 119 | ||
120 | g->ops.fb.set_mmu_page_size(g); | 120 | g->ops.fb.set_mmu_page_size(g); |
121 | 121 | ||
122 | inst_pa = (u32)(inst_pa >> bus_bar2_block_ptr_shift_v()); | 122 | inst_pa = (u32)(inst_pa >> bus_bar2_block_ptr_shift_v()); |
123 | gk20a_dbg_info("bar2 inst block ptr: 0x%08x", (u32)inst_pa); | 123 | nvgpu_log_info(g, "bar2 inst block ptr: 0x%08x", (u32)inst_pa); |
124 | 124 | ||
125 | gk20a_writel(g, bus_bar2_block_r(), | 125 | gk20a_writel(g, bus_bar2_block_r(), |
126 | nvgpu_aperture_mask(g, inst_block, | 126 | nvgpu_aperture_mask(g, inst_block, |
@@ -130,7 +130,7 @@ int gp10b_init_bar2_mm_hw_setup(struct gk20a *g) | |||
130 | bus_bar2_block_mode_virtual_f() | | 130 | bus_bar2_block_mode_virtual_f() | |
131 | bus_bar2_block_ptr_f(inst_pa)); | 131 | bus_bar2_block_ptr_f(inst_pa)); |
132 | 132 | ||
133 | gk20a_dbg_fn("done"); | 133 | nvgpu_log_fn(g, "done"); |
134 | return 0; | 134 | return 0; |
135 | } | 135 | } |
136 | 136 | ||
@@ -433,7 +433,7 @@ void gp10b_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *inst_block, | |||
433 | u32 pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v()); | 433 | u32 pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v()); |
434 | u32 pdb_addr_hi = u64_hi32(pdb_addr); | 434 | u32 pdb_addr_hi = u64_hi32(pdb_addr); |
435 | 435 | ||
436 | gk20a_dbg_info("pde pa=0x%llx", pdb_addr); | 436 | nvgpu_log_info(g, "pde pa=0x%llx", pdb_addr); |
437 | 437 | ||
438 | nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(), | 438 | nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(), |
439 | nvgpu_aperture_mask(g, vm->pdb.mem, | 439 | nvgpu_aperture_mask(g, vm->pdb.mem, |
diff --git a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c index c94d580a..ca111725 100644 --- a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c | |||
@@ -37,8 +37,8 @@ | |||
37 | #include <nvgpu/hw/gp10b/hw_pwr_gp10b.h> | 37 | #include <nvgpu/hw/gp10b/hw_pwr_gp10b.h> |
38 | #include <nvgpu/hw/gp10b/hw_fuse_gp10b.h> | 38 | #include <nvgpu/hw/gp10b/hw_fuse_gp10b.h> |
39 | 39 | ||
40 | #define gp10b_dbg_pmu(fmt, arg...) \ | 40 | #define gp10b_dbg_pmu(g, fmt, arg...) \ |
41 | gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) | 41 | nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) |
42 | 42 | ||
43 | /* PROD settings for ELPG sequencing registers*/ | 43 | /* PROD settings for ELPG sequencing registers*/ |
44 | static struct pg_init_sequence_list _pginitseq_gp10b[] = { | 44 | static struct pg_init_sequence_list _pginitseq_gp10b[] = { |
@@ -147,9 +147,9 @@ static void gp10b_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask, | |||
147 | struct pmu_cmd cmd; | 147 | struct pmu_cmd cmd; |
148 | u32 seq; | 148 | u32 seq; |
149 | 149 | ||
150 | gk20a_dbg_fn(""); | 150 | nvgpu_log_fn(g, " "); |
151 | 151 | ||
152 | gp10b_dbg_pmu("wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done); | 152 | gp10b_dbg_pmu(g, "wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done); |
153 | if (g->pmu_lsf_pmu_wpr_init_done) { | 153 | if (g->pmu_lsf_pmu_wpr_init_done) { |
154 | /* send message to load FECS falcon */ | 154 | /* send message to load FECS falcon */ |
155 | memset(&cmd, 0, sizeof(struct pmu_cmd)); | 155 | memset(&cmd, 0, sizeof(struct pmu_cmd)); |
@@ -164,13 +164,13 @@ static void gp10b_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask, | |||
164 | cmd.cmd.acr.boot_falcons.usevamask = 0; | 164 | cmd.cmd.acr.boot_falcons.usevamask = 0; |
165 | cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0x0; | 165 | cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0x0; |
166 | cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0x0; | 166 | cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0x0; |
167 | gp10b_dbg_pmu("PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n", | 167 | gp10b_dbg_pmu(g, "PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n", |
168 | falconidmask); | 168 | falconidmask); |
169 | nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, | 169 | nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, |
170 | pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0); | 170 | pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0); |
171 | } | 171 | } |
172 | 172 | ||
173 | gk20a_dbg_fn("done"); | 173 | nvgpu_log_fn(g, "done"); |
174 | return; | 174 | return; |
175 | } | 175 | } |
176 | 176 | ||
@@ -209,7 +209,7 @@ int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask) | |||
209 | static void pmu_handle_gr_param_msg(struct gk20a *g, struct pmu_msg *msg, | 209 | static void pmu_handle_gr_param_msg(struct gk20a *g, struct pmu_msg *msg, |
210 | void *param, u32 handle, u32 status) | 210 | void *param, u32 handle, u32 status) |
211 | { | 211 | { |
212 | gk20a_dbg_fn(""); | 212 | nvgpu_log_fn(g, " "); |
213 | 213 | ||
214 | if (status != 0) { | 214 | if (status != 0) { |
215 | nvgpu_err(g, "GR PARAM cmd aborted"); | 215 | nvgpu_err(g, "GR PARAM cmd aborted"); |
@@ -217,7 +217,7 @@ static void pmu_handle_gr_param_msg(struct gk20a *g, struct pmu_msg *msg, | |||
217 | return; | 217 | return; |
218 | } | 218 | } |
219 | 219 | ||
220 | gp10b_dbg_pmu("GR PARAM is acknowledged from PMU %x \n", | 220 | gp10b_dbg_pmu(g, "GR PARAM is acknowledged from PMU %x \n", |
221 | msg->msg.pg.msg_type); | 221 | msg->msg.pg.msg_type); |
222 | 222 | ||
223 | return; | 223 | return; |
@@ -243,7 +243,7 @@ int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id) | |||
243 | cmd.cmd.pg.gr_init_param_v2.ldiv_slowdown_factor = | 243 | cmd.cmd.pg.gr_init_param_v2.ldiv_slowdown_factor = |
244 | g->ldiv_slowdown_factor; | 244 | g->ldiv_slowdown_factor; |
245 | 245 | ||
246 | gp10b_dbg_pmu("cmd post PMU_PG_CMD_ID_PG_PARAM "); | 246 | gp10b_dbg_pmu(g, "cmd post PMU_PG_CMD_ID_PG_PARAM "); |
247 | nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, | 247 | nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, |
248 | pmu_handle_gr_param_msg, pmu, &seq, ~0); | 248 | pmu_handle_gr_param_msg, pmu, &seq, ~0); |
249 | 249 | ||
@@ -276,7 +276,7 @@ int gp10b_pmu_setup_elpg(struct gk20a *g) | |||
276 | u32 reg_writes; | 276 | u32 reg_writes; |
277 | u32 index; | 277 | u32 index; |
278 | 278 | ||
279 | gk20a_dbg_fn(""); | 279 | nvgpu_log_fn(g, " "); |
280 | 280 | ||
281 | if (g->elpg_enabled) { | 281 | if (g->elpg_enabled) { |
282 | reg_writes = ((sizeof(_pginitseq_gp10b) / | 282 | reg_writes = ((sizeof(_pginitseq_gp10b) / |
@@ -288,7 +288,7 @@ int gp10b_pmu_setup_elpg(struct gk20a *g) | |||
288 | } | 288 | } |
289 | } | 289 | } |
290 | 290 | ||
291 | gk20a_dbg_fn("done"); | 291 | nvgpu_log_fn(g, "done"); |
292 | return ret; | 292 | return ret; |
293 | } | 293 | } |
294 | 294 | ||
@@ -305,7 +305,7 @@ int gp10b_init_pmu_setup_hw1(struct gk20a *g) | |||
305 | struct nvgpu_pmu *pmu = &g->pmu; | 305 | struct nvgpu_pmu *pmu = &g->pmu; |
306 | int err; | 306 | int err; |
307 | 307 | ||
308 | gk20a_dbg_fn(""); | 308 | nvgpu_log_fn(g, " "); |
309 | 309 | ||
310 | nvgpu_mutex_acquire(&pmu->isr_mutex); | 310 | nvgpu_mutex_acquire(&pmu->isr_mutex); |
311 | nvgpu_flcn_reset(pmu->flcn); | 311 | nvgpu_flcn_reset(pmu->flcn); |
@@ -333,7 +333,7 @@ int gp10b_init_pmu_setup_hw1(struct gk20a *g) | |||
333 | if (err) | 333 | if (err) |
334 | return err; | 334 | return err; |
335 | 335 | ||
336 | gk20a_dbg_fn("done"); | 336 | nvgpu_log_fn(g, "done"); |
337 | return 0; | 337 | return 0; |
338 | 338 | ||
339 | } | 339 | } |
diff --git a/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c b/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c index 385bebbd..3f089545 100644 --- a/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/rpfb_gp10b.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * GP10B RPFB | 2 | * GP10B RPFB |
3 | * | 3 | * |
4 | * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. | 4 | * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), | 7 | * copy of this software and associated documentation files (the "Software"), |
@@ -42,7 +42,7 @@ int gp10b_replayable_pagefault_buffer_init(struct gk20a *g) | |||
42 | size_t rbfb_size = NV_UVM_FAULT_BUF_SIZE * | 42 | size_t rbfb_size = NV_UVM_FAULT_BUF_SIZE * |
43 | fifo_replay_fault_buffer_size_hw_entries_v(); | 43 | fifo_replay_fault_buffer_size_hw_entries_v(); |
44 | 44 | ||
45 | gk20a_dbg_fn(""); | 45 | nvgpu_log_fn(g, " "); |
46 | 46 | ||
47 | if (!g->mm.bar2_desc.gpu_va) { | 47 | if (!g->mm.bar2_desc.gpu_va) { |
48 | err = nvgpu_dma_alloc_map_sys(vm, rbfb_size, | 48 | err = nvgpu_dma_alloc_map_sys(vm, rbfb_size, |
@@ -60,7 +60,7 @@ int gp10b_replayable_pagefault_buffer_init(struct gk20a *g) | |||
60 | gk20a_writel(g, fifo_replay_fault_buffer_lo_r(), | 60 | gk20a_writel(g, fifo_replay_fault_buffer_lo_r(), |
61 | fifo_replay_fault_buffer_lo_base_f(addr_lo) | | 61 | fifo_replay_fault_buffer_lo_base_f(addr_lo) | |
62 | fifo_replay_fault_buffer_lo_enable_true_v()); | 62 | fifo_replay_fault_buffer_lo_enable_true_v()); |
63 | gk20a_dbg_fn("done"); | 63 | nvgpu_log_fn(g, "done"); |
64 | return 0; | 64 | return 0; |
65 | } | 65 | } |
66 | 66 | ||
@@ -75,14 +75,14 @@ u32 gp10b_replayable_pagefault_buffer_get_index(struct gk20a *g) | |||
75 | { | 75 | { |
76 | u32 get_idx = 0; | 76 | u32 get_idx = 0; |
77 | 77 | ||
78 | gk20a_dbg_fn(""); | 78 | nvgpu_log_fn(g, " "); |
79 | 79 | ||
80 | get_idx = gk20a_readl(g, fifo_replay_fault_buffer_get_r()); | 80 | get_idx = gk20a_readl(g, fifo_replay_fault_buffer_get_r()); |
81 | 81 | ||
82 | if (get_idx >= fifo_replay_fault_buffer_size_hw_entries_v()) | 82 | if (get_idx >= fifo_replay_fault_buffer_size_hw_entries_v()) |
83 | nvgpu_err(g, "Error in replayable fault buffer"); | 83 | nvgpu_err(g, "Error in replayable fault buffer"); |
84 | 84 | ||
85 | gk20a_dbg_fn("done"); | 85 | nvgpu_log_fn(g, "done"); |
86 | return get_idx; | 86 | return get_idx; |
87 | } | 87 | } |
88 | 88 | ||
@@ -90,13 +90,13 @@ u32 gp10b_replayable_pagefault_buffer_put_index(struct gk20a *g) | |||
90 | { | 90 | { |
91 | u32 put_idx = 0; | 91 | u32 put_idx = 0; |
92 | 92 | ||
93 | gk20a_dbg_fn(""); | 93 | nvgpu_log_fn(g, " "); |
94 | put_idx = gk20a_readl(g, fifo_replay_fault_buffer_put_r()); | 94 | put_idx = gk20a_readl(g, fifo_replay_fault_buffer_put_r()); |
95 | 95 | ||
96 | if (put_idx >= fifo_replay_fault_buffer_size_hw_entries_v()) | 96 | if (put_idx >= fifo_replay_fault_buffer_size_hw_entries_v()) |
97 | nvgpu_err(g, "Error in UVM"); | 97 | nvgpu_err(g, "Error in UVM"); |
98 | 98 | ||
99 | gk20a_dbg_fn("done"); | 99 | nvgpu_log_fn(g, "done"); |
100 | return put_idx; | 100 | return put_idx; |
101 | } | 101 | } |
102 | 102 | ||
diff --git a/drivers/gpu/nvgpu/gp10b/therm_gp10b.c b/drivers/gpu/nvgpu/gp10b/therm_gp10b.c index c69bd0bb..4f1de559 100644 --- a/drivers/gpu/nvgpu/gp10b/therm_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/therm_gp10b.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * GP10B Therm | 2 | * GP10B Therm |
3 | * | 3 | * |
4 | * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. | 4 | * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), | 7 | * copy of this software and associated documentation files (the "Software"), |
@@ -33,7 +33,7 @@ int gp10b_init_therm_setup_hw(struct gk20a *g) | |||
33 | { | 33 | { |
34 | u32 v; | 34 | u32 v; |
35 | 35 | ||
36 | gk20a_dbg_fn(""); | 36 | nvgpu_log_fn(g, " "); |
37 | 37 | ||
38 | /* program NV_THERM registers */ | 38 | /* program NV_THERM registers */ |
39 | gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() | | 39 | gk20a_writel(g, therm_use_a_r(), therm_use_a_ext_therm_0_enable_f() | |
@@ -96,7 +96,7 @@ int gp10b_elcg_init_idle_filters(struct gk20a *g) | |||
96 | u32 active_engine_id = 0; | 96 | u32 active_engine_id = 0; |
97 | struct fifo_gk20a *f = &g->fifo; | 97 | struct fifo_gk20a *f = &g->fifo; |
98 | 98 | ||
99 | gk20a_dbg_fn(""); | 99 | nvgpu_log_fn(g, " "); |
100 | 100 | ||
101 | for (engine_id = 0; engine_id < f->num_engines; engine_id++) { | 101 | for (engine_id = 0; engine_id < f->num_engines; engine_id++) { |
102 | active_engine_id = f->active_engines_list[engine_id]; | 102 | active_engine_id = f->active_engines_list[engine_id]; |
@@ -130,6 +130,6 @@ int gp10b_elcg_init_idle_filters(struct gk20a *g) | |||
130 | idle_filter &= ~therm_hubmmu_idle_filter_value_m(); | 130 | idle_filter &= ~therm_hubmmu_idle_filter_value_m(); |
131 | gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter); | 131 | gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter); |
132 | 132 | ||
133 | gk20a_dbg_fn("done"); | 133 | nvgpu_log_fn(g, "done"); |
134 | return 0; | 134 | return 0; |
135 | } | 135 | } |