summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2018-04-18 22:39:46 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-05-09 21:26:04 -0400
commitdd739fcb039d51606e9a5454ec0aab17bcb01965 (patch)
tree806ba8575d146367ad1be00086ca0cdae35a6b28 /drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
parent7e66f2a63d4855e763fa768047dfc32f6f96b771 (diff)
gpu: nvgpu: Remove gk20a_dbg* functions
Switch all logging to nvgpu_log*(). gk20a_dbg* macros are intentionally left there because of use from other repositories. Because the new functions do not work without a pointer to struct gk20a, and piping it just for logging is excessive, some log messages are deleted. Change-Id: I00e22e75fe4596a330bb0282ab4774b3639ee31e Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1704148 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c36
1 files changed, 18 insertions, 18 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
index 4fda0d2e..c9d7ea06 100644
--- a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -137,7 +137,7 @@ static int gk20a_fecs_trace_get_write_index(struct gk20a *g)
137 137
138static int gk20a_fecs_trace_set_read_index(struct gk20a *g, int index) 138static int gk20a_fecs_trace_set_read_index(struct gk20a *g, int index)
139{ 139{
140 gk20a_dbg(gpu_dbg_ctxsw, "set read=%d", index); 140 nvgpu_log(g, gpu_dbg_ctxsw, "set read=%d", index);
141 return gr_gk20a_elpg_protected_call(g, 141 return gr_gk20a_elpg_protected_call(g,
142 (gk20a_writel(g, gr_fecs_mailbox1_r(), index), 0)); 142 (gk20a_writel(g, gr_fecs_mailbox1_r(), index), 0));
143} 143}
@@ -148,12 +148,12 @@ void gk20a_fecs_trace_hash_dump(struct gk20a *g)
148 struct gk20a_fecs_trace_hash_ent *ent; 148 struct gk20a_fecs_trace_hash_ent *ent;
149 struct gk20a_fecs_trace *trace = g->fecs_trace; 149 struct gk20a_fecs_trace *trace = g->fecs_trace;
150 150
151 gk20a_dbg(gpu_dbg_ctxsw, "dumping hash table"); 151 nvgpu_log(g, gpu_dbg_ctxsw, "dumping hash table");
152 152
153 nvgpu_mutex_acquire(&trace->hash_lock); 153 nvgpu_mutex_acquire(&trace->hash_lock);
154 hash_for_each(trace->pid_hash_table, bkt, ent, node) 154 hash_for_each(trace->pid_hash_table, bkt, ent, node)
155 { 155 {
156 gk20a_dbg(gpu_dbg_ctxsw, " ent=%p bkt=%x context_ptr=%x pid=%d", 156 nvgpu_log(g, gpu_dbg_ctxsw, " ent=%p bkt=%x context_ptr=%x pid=%d",
157 ent, bkt, ent->context_ptr, ent->pid); 157 ent, bkt, ent->context_ptr, ent->pid);
158 158
159 } 159 }
@@ -165,7 +165,7 @@ static int gk20a_fecs_trace_hash_add(struct gk20a *g, u32 context_ptr, pid_t pid
165 struct gk20a_fecs_trace_hash_ent *he; 165 struct gk20a_fecs_trace_hash_ent *he;
166 struct gk20a_fecs_trace *trace = g->fecs_trace; 166 struct gk20a_fecs_trace *trace = g->fecs_trace;
167 167
168 gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, 168 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw,
169 "adding hash entry context_ptr=%x -> pid=%d", context_ptr, pid); 169 "adding hash entry context_ptr=%x -> pid=%d", context_ptr, pid);
170 170
171 he = nvgpu_kzalloc(g, sizeof(*he)); 171 he = nvgpu_kzalloc(g, sizeof(*he));
@@ -190,7 +190,7 @@ static void gk20a_fecs_trace_hash_del(struct gk20a *g, u32 context_ptr)
190 struct gk20a_fecs_trace_hash_ent *ent; 190 struct gk20a_fecs_trace_hash_ent *ent;
191 struct gk20a_fecs_trace *trace = g->fecs_trace; 191 struct gk20a_fecs_trace *trace = g->fecs_trace;
192 192
193 gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, 193 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw,
194 "freeing hash entry context_ptr=%x", context_ptr); 194 "freeing hash entry context_ptr=%x", context_ptr);
195 195
196 nvgpu_mutex_acquire(&trace->hash_lock); 196 nvgpu_mutex_acquire(&trace->hash_lock);
@@ -198,7 +198,7 @@ static void gk20a_fecs_trace_hash_del(struct gk20a *g, u32 context_ptr)
198 context_ptr) { 198 context_ptr) {
199 if (ent->context_ptr == context_ptr) { 199 if (ent->context_ptr == context_ptr) {
200 hash_del(&ent->node); 200 hash_del(&ent->node);
201 gk20a_dbg(gpu_dbg_ctxsw, 201 nvgpu_log(g, gpu_dbg_ctxsw,
202 "freed hash entry=%p context_ptr=%x", ent, 202 "freed hash entry=%p context_ptr=%x", ent,
203 ent->context_ptr); 203 ent->context_ptr);
204 nvgpu_kfree(g, ent); 204 nvgpu_kfree(g, ent);
@@ -215,7 +215,7 @@ static void gk20a_fecs_trace_free_hash_table(struct gk20a *g)
215 struct gk20a_fecs_trace_hash_ent *ent; 215 struct gk20a_fecs_trace_hash_ent *ent;
216 struct gk20a_fecs_trace *trace = g->fecs_trace; 216 struct gk20a_fecs_trace *trace = g->fecs_trace;
217 217
218 gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, "trace=%p", trace); 218 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw, "trace=%p", trace);
219 219
220 nvgpu_mutex_acquire(&trace->hash_lock); 220 nvgpu_mutex_acquire(&trace->hash_lock);
221 hash_for_each_safe(trace->pid_hash_table, bkt, tmp, ent, node) { 221 hash_for_each_safe(trace->pid_hash_table, bkt, tmp, ent, node) {
@@ -235,7 +235,7 @@ static pid_t gk20a_fecs_trace_find_pid(struct gk20a *g, u32 context_ptr)
235 nvgpu_mutex_acquire(&trace->hash_lock); 235 nvgpu_mutex_acquire(&trace->hash_lock);
236 hash_for_each_possible(trace->pid_hash_table, ent, node, context_ptr) { 236 hash_for_each_possible(trace->pid_hash_table, ent, node, context_ptr) {
237 if (ent->context_ptr == context_ptr) { 237 if (ent->context_ptr == context_ptr) {
238 gk20a_dbg(gpu_dbg_ctxsw, 238 nvgpu_log(g, gpu_dbg_ctxsw,
239 "found context_ptr=%x -> pid=%d", 239 "found context_ptr=%x -> pid=%d",
240 ent->context_ptr, ent->pid); 240 ent->context_ptr, ent->pid);
241 pid = ent->pid; 241 pid = ent->pid;
@@ -265,7 +265,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index)
265 struct gk20a_fecs_trace_record *r = gk20a_fecs_trace_get_record( 265 struct gk20a_fecs_trace_record *r = gk20a_fecs_trace_get_record(
266 trace, index); 266 trace, index);
267 267
268 gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, 268 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw,
269 "consuming record trace=%p read=%d record=%p", trace, index, r); 269 "consuming record trace=%p read=%d record=%p", trace, index, r);
270 270
271 if (unlikely(!gk20a_fecs_trace_is_valid_record(r))) { 271 if (unlikely(!gk20a_fecs_trace_is_valid_record(r))) {
@@ -284,7 +284,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index)
284 cur_pid = gk20a_fecs_trace_find_pid(g, r->context_ptr); 284 cur_pid = gk20a_fecs_trace_find_pid(g, r->context_ptr);
285 new_pid = gk20a_fecs_trace_find_pid(g, r->new_context_ptr); 285 new_pid = gk20a_fecs_trace_find_pid(g, r->new_context_ptr);
286 286
287 gk20a_dbg(gpu_dbg_fn | gpu_dbg_ctxsw, 287 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_ctxsw,
288 "context_ptr=%x (pid=%d) new_context_ptr=%x (pid=%d)", 288 "context_ptr=%x (pid=%d) new_context_ptr=%x (pid=%d)",
289 r->context_ptr, cur_pid, r->new_context_ptr, new_pid); 289 r->context_ptr, cur_pid, r->new_context_ptr, new_pid);
290 290
@@ -298,7 +298,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index)
298 entry.timestamp = gk20a_fecs_trace_record_ts_timestamp_v(r->ts[i]); 298 entry.timestamp = gk20a_fecs_trace_record_ts_timestamp_v(r->ts[i]);
299 entry.timestamp <<= GK20A_FECS_TRACE_PTIMER_SHIFT; 299 entry.timestamp <<= GK20A_FECS_TRACE_PTIMER_SHIFT;
300 300
301 gk20a_dbg(gpu_dbg_ctxsw, 301 nvgpu_log(g, gpu_dbg_ctxsw,
302 "tag=%x timestamp=%llx context_id=%08x new_context_id=%08x", 302 "tag=%x timestamp=%llx context_id=%08x new_context_id=%08x",
303 entry.tag, entry.timestamp, r->context_id, 303 entry.tag, entry.timestamp, r->context_id,
304 r->new_context_id); 304 r->new_context_id);
@@ -327,7 +327,7 @@ static int gk20a_fecs_trace_ring_read(struct gk20a *g, int index)
327 continue; 327 continue;
328 } 328 }
329 329
330 gk20a_dbg(gpu_dbg_ctxsw, "tag=%x context_id=%x pid=%lld", 330 nvgpu_log(g, gpu_dbg_ctxsw, "tag=%x context_id=%x pid=%lld",
331 entry.tag, entry.context_id, entry.pid); 331 entry.tag, entry.context_id, entry.pid);
332 332
333 if (!entry.context_id) 333 if (!entry.context_id)
@@ -368,7 +368,7 @@ int gk20a_fecs_trace_poll(struct gk20a *g)
368 if (!cnt) 368 if (!cnt)
369 goto done; 369 goto done;
370 370
371 gk20a_dbg(gpu_dbg_ctxsw, 371 nvgpu_log(g, gpu_dbg_ctxsw,
372 "circular buffer: read=%d (mailbox=%d) write=%d cnt=%d", 372 "circular buffer: read=%d (mailbox=%d) write=%d cnt=%d",
373 read, gk20a_fecs_trace_get_read_index(g), write, cnt); 373 read, gk20a_fecs_trace_get_read_index(g), write, cnt);
374 374
@@ -633,7 +633,7 @@ int gk20a_fecs_trace_bind_channel(struct gk20a *g,
633 pid_t pid; 633 pid_t pid;
634 u32 aperture; 634 u32 aperture;
635 635
636 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, 636 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw,
637 "chid=%d context_ptr=%x inst_block=%llx", 637 "chid=%d context_ptr=%x inst_block=%llx",
638 ch->chid, context_ptr, 638 ch->chid, context_ptr,
639 nvgpu_inst_block_addr(g, &ch->inst_block)); 639 nvgpu_inst_block_addr(g, &ch->inst_block));
@@ -662,7 +662,7 @@ int gk20a_fecs_trace_bind_channel(struct gk20a *g,
662 lo = u64_lo32(pa); 662 lo = u64_lo32(pa);
663 hi = u64_hi32(pa); 663 hi = u64_hi32(pa);
664 664
665 gk20a_dbg(gpu_dbg_ctxsw, "addr_hi=%x addr_lo=%x count=%d", hi, 665 nvgpu_log(g, gpu_dbg_ctxsw, "addr_hi=%x addr_lo=%x count=%d", hi,
666 lo, GK20A_FECS_TRACE_NUM_RECORDS); 666 lo, GK20A_FECS_TRACE_NUM_RECORDS);
667 667
668 nvgpu_mem_wr(g, mem, 668 nvgpu_mem_wr(g, mem,
@@ -696,7 +696,7 @@ int gk20a_fecs_trace_unbind_channel(struct gk20a *g, struct channel_gk20a *ch)
696 u32 context_ptr = gk20a_fecs_trace_fecs_context_ptr(g, ch); 696 u32 context_ptr = gk20a_fecs_trace_fecs_context_ptr(g, ch);
697 697
698 if (g->fecs_trace) { 698 if (g->fecs_trace) {
699 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, 699 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw,
700 "ch=%p context_ptr=%x", ch, context_ptr); 700 "ch=%p context_ptr=%x", ch, context_ptr);
701 701
702 if (g->ops.fecs_trace.is_enabled(g)) { 702 if (g->ops.fecs_trace.is_enabled(g)) {
@@ -711,7 +711,7 @@ int gk20a_fecs_trace_unbind_channel(struct gk20a *g, struct channel_gk20a *ch)
711 711
712int gk20a_fecs_trace_reset(struct gk20a *g) 712int gk20a_fecs_trace_reset(struct gk20a *g)
713{ 713{
714 gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, ""); 714 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_ctxsw, " ");
715 715
716 if (!g->ops.fecs_trace.is_enabled(g)) 716 if (!g->ops.fecs_trace.is_enabled(g))
717 return 0; 717 return 0;