summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-03-30 10:44:03 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-10 22:04:19 -0400
commit3ba374a5d94f8c2067731155afaf79f03e6c390c (patch)
treed8a2bd0d52b1e8862510aedeb7529944c0b7e28e /drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
parent2be51206af88aba6662cdd9de5bd6c18989bbcbd (diff)
gpu: nvgpu: gk20a: Use new error macro
gk20a_err() and gk20a_warn() require a struct device pointer, which is not portable across operating systems. The new nvgpu_err() and nvgpu_warn() macros take struct gk20a pointer. Convert code to use the more portable macros. JIRA NVGPU-16 Change-Id: Ia51f36d94c5ce57a5a0ab83b3c83a6bce09e2d5c Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1331694 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/pmu_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c208
1 files changed, 89 insertions, 119 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 547ba924..38b8da9c 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -27,6 +27,7 @@
27#include <nvgpu/timers.h> 27#include <nvgpu/timers.h>
28#include <nvgpu/kmem.h> 28#include <nvgpu/kmem.h>
29#include <nvgpu/dma.h> 29#include <nvgpu/dma.h>
30#include <nvgpu/log.h>
30 31
31#include "gk20a.h" 32#include "gk20a.h"
32#include "gr_gk20a.h" 33#include "gr_gk20a.h"
@@ -314,7 +315,7 @@ static void printtrace(struct pmu_gk20a *pmu)
314 trace = (char *)tracebuffer; 315 trace = (char *)tracebuffer;
315 trace1 = (u32 *)tracebuffer; 316 trace1 = (u32 *)tracebuffer;
316 317
317 gk20a_err(dev_from_gk20a(g), "Dump pmutrace"); 318 nvgpu_err(g, "Dump pmutrace");
318 for (i = 0; i < GK20A_PMU_TRACE_BUFSIZE; i += 0x40) { 319 for (i = 0; i < GK20A_PMU_TRACE_BUFSIZE; i += 0x40) {
319 for (j = 0; j < 0x40; j++) 320 for (j = 0; j < 0x40; j++)
320 if (trace1[(i / 4) + j]) 321 if (trace1[(i / 4) + j])
@@ -335,7 +336,7 @@ static void printtrace(struct pmu_gk20a *pmu)
335 m += k + 2; 336 m += k + 2;
336 } 337 }
337 scnprintf((buf + count), 0x40, "%s", (trace+i+20+m)); 338 scnprintf((buf + count), 0x40, "%s", (trace+i+20+m));
338 gk20a_err(dev_from_gk20a(g), "%s", buf); 339 nvgpu_err(g, "%s", buf);
339 } 340 }
340 nvgpu_kfree(g, tracebuffer); 341 nvgpu_kfree(g, tracebuffer);
341} 342}
@@ -2184,8 +2185,7 @@ int gk20a_init_pmu(struct pmu_gk20a *pmu)
2184 get_pmu_sequence_out_alloc_ptr_v0; 2185 get_pmu_sequence_out_alloc_ptr_v0;
2185 break; 2186 break;
2186 default: 2187 default:
2187 gk20a_err(dev_from_gk20a(gk20a_from_pmu(pmu)), 2188 nvgpu_err(g, "PMU code version not supported version: %d\n",
2188 "PMU code version not supported version: %d\n",
2189 pmu->desc->app_version); 2189 pmu->desc->app_version);
2190 err = -EINVAL; 2190 err = -EINVAL;
2191 goto fail_pmu_seq; 2191 goto fail_pmu_seq;
@@ -2217,14 +2217,12 @@ void pmu_copy_from_dmem(struct pmu_gk20a *pmu,
2217 u32 *dst_u32 = (u32*)dst; 2217 u32 *dst_u32 = (u32*)dst;
2218 2218
2219 if (size == 0) { 2219 if (size == 0) {
2220 gk20a_err(dev_from_gk20a(g), 2220 nvgpu_err(g, "size is zero");
2221 "size is zero");
2222 return; 2221 return;
2223 } 2222 }
2224 2223
2225 if (src & 0x3) { 2224 if (src & 0x3) {
2226 gk20a_err(dev_from_gk20a(g), 2225 nvgpu_err(g, "src (0x%08x) not 4-byte aligned", src);
2227 "src (0x%08x) not 4-byte aligned", src);
2228 return; 2226 return;
2229 } 2227 }
2230 2228
@@ -2263,14 +2261,12 @@ void pmu_copy_to_dmem(struct pmu_gk20a *pmu,
2263 u32 *src_u32 = (u32*)src; 2261 u32 *src_u32 = (u32*)src;
2264 2262
2265 if (size == 0) { 2263 if (size == 0) {
2266 gk20a_err(dev_from_gk20a(g), 2264 nvgpu_err(g, "size is zero");
2267 "size is zero");
2268 return; 2265 return;
2269 } 2266 }
2270 2267
2271 if (dst & 0x3) { 2268 if (dst & 0x3) {
2272 gk20a_err(dev_from_gk20a(g), 2269 nvgpu_err(g, "dst (0x%08x) not 4-byte aligned", dst);
2273 "dst (0x%08x) not 4-byte aligned", dst);
2274 return; 2270 return;
2275 } 2271 }
2276 2272
@@ -2300,8 +2296,7 @@ void pmu_copy_to_dmem(struct pmu_gk20a *pmu,
2300 data = gk20a_readl(g, pwr_falcon_dmemc_r(port)) & addr_mask; 2296 data = gk20a_readl(g, pwr_falcon_dmemc_r(port)) & addr_mask;
2301 size = ALIGN(size, 4); 2297 size = ALIGN(size, 4);
2302 if (data != ((dst + size) & addr_mask)) { 2298 if (data != ((dst + size) & addr_mask)) {
2303 gk20a_err(dev_from_gk20a(g), 2299 nvgpu_err(g, "copy failed. bytes written %d, expected %d",
2304 "copy failed. bytes written %d, expected %d",
2305 data - dst, size); 2300 data - dst, size);
2306 } 2301 }
2307 nvgpu_mutex_release(&pmu->pmu_copy_lock); 2302 nvgpu_mutex_release(&pmu->pmu_copy_lock);
@@ -2432,7 +2427,7 @@ int pmu_enable_hw(struct pmu_gk20a *pmu, bool enable)
2432 } while (!nvgpu_timeout_expired(&timeout)); 2427 } while (!nvgpu_timeout_expired(&timeout));
2433 2428
2434 g->ops.mc.disable(g, mc_enable_pwr_enabled_f()); 2429 g->ops.mc.disable(g, mc_enable_pwr_enabled_f());
2435 gk20a_err(dev_from_gk20a(g), "Falcon mem scrubbing timeout"); 2430 nvgpu_err(g, "Falcon mem scrubbing timeout");
2436 2431
2437 return -ETIMEDOUT; 2432 return -ETIMEDOUT;
2438 } else { 2433 } else {
@@ -2615,8 +2610,7 @@ static int pmu_seq_acquire(struct pmu_gk20a *pmu,
2615 index = find_first_zero_bit(pmu->pmu_seq_tbl, 2610 index = find_first_zero_bit(pmu->pmu_seq_tbl,
2616 sizeof(pmu->pmu_seq_tbl)); 2611 sizeof(pmu->pmu_seq_tbl));
2617 if (index >= sizeof(pmu->pmu_seq_tbl)) { 2612 if (index >= sizeof(pmu->pmu_seq_tbl)) {
2618 gk20a_err(dev_from_gk20a(g), 2613 nvgpu_err(g, "no free sequence available");
2619 "no free sequence available");
2620 nvgpu_mutex_release(&pmu->pmu_seq_lock); 2614 nvgpu_mutex_release(&pmu->pmu_seq_lock);
2621 return -EAGAIN; 2615 return -EAGAIN;
2622 } 2616 }
@@ -2787,7 +2781,7 @@ int pmu_mutex_acquire(struct pmu_gk20a *pmu, u32 id, u32 *token)
2787 gk20a_readl(g, pwr_pmu_mutex_id_r())); 2781 gk20a_readl(g, pwr_pmu_mutex_id_r()));
2788 if (data == pwr_pmu_mutex_id_value_init_v() || 2782 if (data == pwr_pmu_mutex_id_value_init_v() ||
2789 data == pwr_pmu_mutex_id_value_not_avail_v()) { 2783 data == pwr_pmu_mutex_id_value_not_avail_v()) {
2790 gk20a_warn(dev_from_gk20a(g), 2784 nvgpu_warn(g,
2791 "fail to generate mutex token: val 0x%08x", 2785 "fail to generate mutex token: val 0x%08x",
2792 owner); 2786 owner);
2793 usleep_range(20, 40); 2787 usleep_range(20, 40);
@@ -2844,8 +2838,7 @@ int pmu_mutex_release(struct pmu_gk20a *pmu, u32 id, u32 *token)
2844 gk20a_readl(g, pwr_pmu_mutex_r(mutex->index))); 2838 gk20a_readl(g, pwr_pmu_mutex_r(mutex->index)));
2845 2839
2846 if (*token != owner) { 2840 if (*token != owner) {
2847 gk20a_err(dev_from_gk20a(g), 2841 nvgpu_err(g, "requester 0x%08x NOT match owner 0x%08x",
2848 "requester 0x%08x NOT match owner 0x%08x",
2849 *token, owner); 2842 *token, owner);
2850 return -EINVAL; 2843 return -EINVAL;
2851 } 2844 }
@@ -2953,8 +2946,7 @@ static int pmu_queue_push(struct pmu_gk20a *pmu,
2953 gk20a_dbg_fn(""); 2946 gk20a_dbg_fn("");
2954 2947
2955 if (!queue->opened && queue->oflag == OFLAG_WRITE){ 2948 if (!queue->opened && queue->oflag == OFLAG_WRITE){
2956 gk20a_err(dev_from_gk20a(gk20a_from_pmu(pmu)), 2949 nvgpu_err(gk20a_from_pmu(pmu), "queue not opened for write");
2957 "queue not opened for write");
2958 return -EINVAL; 2950 return -EINVAL;
2959 } 2951 }
2960 2952
@@ -2972,8 +2964,7 @@ static int pmu_queue_pop(struct pmu_gk20a *pmu,
2972 *bytes_read = 0; 2964 *bytes_read = 0;
2973 2965
2974 if (!queue->opened && queue->oflag == OFLAG_READ){ 2966 if (!queue->opened && queue->oflag == OFLAG_READ){
2975 gk20a_err(dev_from_gk20a(gk20a_from_pmu(pmu)), 2967 nvgpu_err(gk20a_from_pmu(pmu), "queue not opened for read");
2976 "queue not opened for read");
2977 return -EINVAL; 2968 return -EINVAL;
2978 } 2969 }
2979 2970
@@ -2989,7 +2980,7 @@ static int pmu_queue_pop(struct pmu_gk20a *pmu,
2989 used = queue->offset + queue->size - tail; 2980 used = queue->offset + queue->size - tail;
2990 2981
2991 if (size > used) { 2982 if (size > used) {
2992 gk20a_warn(dev_from_gk20a(gk20a_from_pmu(pmu)), 2983 nvgpu_warn(gk20a_from_pmu(pmu),
2993 "queue size smaller than request read"); 2984 "queue size smaller than request read");
2994 size = used; 2985 size = used;
2995 } 2986 }
@@ -3008,8 +2999,7 @@ static void pmu_queue_rewind(struct pmu_gk20a *pmu,
3008 gk20a_dbg_fn(""); 2999 gk20a_dbg_fn("");
3009 3000
3010 if (!queue->opened) { 3001 if (!queue->opened) {
3011 gk20a_err(dev_from_gk20a(gk20a_from_pmu(pmu)), 3002 nvgpu_err(gk20a_from_pmu(pmu), "queue not opened");
3012 "queue not opened");
3013 return; 3003 return;
3014 } 3004 }
3015 3005
@@ -3132,7 +3122,6 @@ static int gk20a_prepare_ucode(struct gk20a *g)
3132{ 3122{
3133 struct pmu_gk20a *pmu = &g->pmu; 3123 struct pmu_gk20a *pmu = &g->pmu;
3134 int err = 0; 3124 int err = 0;
3135 struct device *d = dev_from_gk20a(g);
3136 struct mm_gk20a *mm = &g->mm; 3125 struct mm_gk20a *mm = &g->mm;
3137 struct vm_gk20a *vm = &mm->pmu.vm; 3126 struct vm_gk20a *vm = &mm->pmu.vm;
3138 3127
@@ -3141,7 +3130,7 @@ static int gk20a_prepare_ucode(struct gk20a *g)
3141 3130
3142 pmu->fw = nvgpu_request_firmware(g, GK20A_PMU_UCODE_IMAGE, 0); 3131 pmu->fw = nvgpu_request_firmware(g, GK20A_PMU_UCODE_IMAGE, 0);
3143 if (!pmu->fw) { 3132 if (!pmu->fw) {
3144 gk20a_err(d, "failed to load pmu ucode!!"); 3133 nvgpu_err(g, "failed to load pmu ucode!!");
3145 return err; 3134 return err;
3146 } 3135 }
3147 3136
@@ -3173,7 +3162,6 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g)
3173 struct pmu_gk20a *pmu = &g->pmu; 3162 struct pmu_gk20a *pmu = &g->pmu;
3174 struct mm_gk20a *mm = &g->mm; 3163 struct mm_gk20a *mm = &g->mm;
3175 struct vm_gk20a *vm = &mm->pmu.vm; 3164 struct vm_gk20a *vm = &mm->pmu.vm;
3176 struct device *d = dev_from_gk20a(g);
3177 unsigned int i; 3165 unsigned int i;
3178 int err = 0; 3166 int err = 0;
3179 u8 *ptr; 3167 u8 *ptr;
@@ -3228,7 +3216,7 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g)
3228 err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE, 3216 err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE,
3229 &pmu->seq_buf); 3217 &pmu->seq_buf);
3230 if (err) { 3218 if (err) {
3231 gk20a_err(d, "failed to allocate memory\n"); 3219 nvgpu_err(g, "failed to allocate memory\n");
3232 goto err_free_seq; 3220 goto err_free_seq;
3233 } 3221 }
3234 3222
@@ -3245,7 +3233,7 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g)
3245 err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE, 3233 err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE,
3246 &pmu->trace_buf); 3234 &pmu->trace_buf);
3247 if (err) { 3235 if (err) {
3248 gk20a_err(d, "failed to allocate pmu trace buffer\n"); 3236 nvgpu_err(g, "failed to allocate pmu trace buffer\n");
3249 goto err_free_seq_buf; 3237 goto err_free_seq_buf;
3250 } 3238 }
3251 3239
@@ -3275,7 +3263,7 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
3275 3263
3276 gk20a_dbg_pmu("reply PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS"); 3264 gk20a_dbg_pmu("reply PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS");
3277 if (status != 0) { 3265 if (status != 0) {
3278 gk20a_err(dev_from_gk20a(g), "PGENG cmd aborted"); 3266 nvgpu_err(g, "PGENG cmd aborted");
3279 /* TBD: disable ELPG */ 3267 /* TBD: disable ELPG */
3280 return; 3268 return;
3281 } 3269 }
@@ -3283,7 +3271,7 @@ static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
3283 pmu->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED); 3271 pmu->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED);
3284 if ((!pmu->buf_loaded) && 3272 if ((!pmu->buf_loaded) &&
3285 (pmu->pmu_state == PMU_STATE_LOADING_PG_BUF)) 3273 (pmu->pmu_state == PMU_STATE_LOADING_PG_BUF))
3286 gk20a_err(dev_from_gk20a(g), "failed to load PGENG buffer"); 3274 nvgpu_err(g, "failed to load PGENG buffer");
3287 else { 3275 else {
3288 schedule_work(&pmu->pg_init); 3276 schedule_work(&pmu->pg_init);
3289 } 3277 }
@@ -3571,7 +3559,7 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
3571 gk20a_dbg_fn(""); 3559 gk20a_dbg_fn("");
3572 3560
3573 if (status != 0) { 3561 if (status != 0) {
3574 gk20a_err(dev_from_gk20a(g), "ELPG cmd aborted"); 3562 nvgpu_err(g, "ELPG cmd aborted");
3575 /* TBD: disable ELPG */ 3563 /* TBD: disable ELPG */
3576 return; 3564 return;
3577 } 3565 }
@@ -3615,7 +3603,7 @@ static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
3615 } 3603 }
3616 break; 3604 break;
3617 default: 3605 default:
3618 gk20a_err(dev_from_gk20a(g), 3606 nvgpu_err(g,
3619 "unsupported ELPG message : 0x%04x", elpg_msg->msg); 3607 "unsupported ELPG message : 0x%04x", elpg_msg->msg);
3620 } 3608 }
3621 3609
@@ -3630,7 +3618,7 @@ static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg,
3630 gk20a_dbg_fn(""); 3618 gk20a_dbg_fn("");
3631 3619
3632 if (status != 0) { 3620 if (status != 0) {
3633 gk20a_err(dev_from_gk20a(g), "ELPG cmd aborted"); 3621 nvgpu_err(g, "ELPG cmd aborted");
3634 /* TBD: disable ELPG */ 3622 /* TBD: disable ELPG */
3635 return; 3623 return;
3636 } 3624 }
@@ -3769,7 +3757,7 @@ static u8 get_perfmon_id(struct pmu_gk20a *pmu)
3769 break; 3757 break;
3770#endif 3758#endif
3771 default: 3759 default:
3772 gk20a_err(g->dev, "no support for %x", ver); 3760 nvgpu_err(g, "no support for %x", ver);
3773 BUG(); 3761 BUG();
3774 } 3762 }
3775 3763
@@ -3837,8 +3825,7 @@ static int pmu_init_perfmon(struct pmu_gk20a *pmu)
3837 pmu->sample_buffer = nvgpu_alloc(&pmu->dmem, 3825 pmu->sample_buffer = nvgpu_alloc(&pmu->dmem,
3838 2 * sizeof(u16)); 3826 2 * sizeof(u16));
3839 if (!pmu->sample_buffer) { 3827 if (!pmu->sample_buffer) {
3840 gk20a_err(dev_from_gk20a(g), 3828 nvgpu_err(g, "failed to allocate perfmon sample buffer");
3841 "failed to allocate perfmon sample buffer");
3842 return -ENOMEM; 3829 return -ENOMEM;
3843 } 3830 }
3844 3831
@@ -3893,8 +3880,7 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu,
3893 pmu_copy_from_dmem(pmu, tail, 3880 pmu_copy_from_dmem(pmu, tail,
3894 (u8 *)&msg->hdr, PMU_MSG_HDR_SIZE, 0); 3881 (u8 *)&msg->hdr, PMU_MSG_HDR_SIZE, 0);
3895 if (msg->hdr.unit_id != PMU_UNIT_INIT) { 3882 if (msg->hdr.unit_id != PMU_UNIT_INIT) {
3896 gk20a_err(dev_from_gk20a(g), 3883 nvgpu_err(g, "expecting init msg");
3897 "expecting init msg");
3898 return -EINVAL; 3884 return -EINVAL;
3899 } 3885 }
3900 3886
@@ -3902,8 +3888,7 @@ static int pmu_process_init_msg(struct pmu_gk20a *pmu,
3902 (u8 *)&msg->msg, msg->hdr.size - PMU_MSG_HDR_SIZE, 0); 3888 (u8 *)&msg->msg, msg->hdr.size - PMU_MSG_HDR_SIZE, 0);
3903 3889
3904 if (msg->msg.init.msg_type != PMU_INIT_MSG_TYPE_PMU_INIT) { 3890 if (msg->msg.init.msg_type != PMU_INIT_MSG_TYPE_PMU_INIT) {
3905 gk20a_err(dev_from_gk20a(g), 3891 nvgpu_err(g, "expecting init msg");
3906 "expecting init msg");
3907 return -EINVAL; 3892 return -EINVAL;
3908 } 3893 }
3909 3894
@@ -3970,8 +3955,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
3970 3955
3971 err = pmu_queue_open_read(pmu, queue); 3956 err = pmu_queue_open_read(pmu, queue);
3972 if (err) { 3957 if (err) {
3973 gk20a_err(dev_from_gk20a(g), 3958 nvgpu_err(g, "fail to open queue %d for read", queue->id);
3974 "fail to open queue %d for read", queue->id);
3975 *status = err; 3959 *status = err;
3976 return false; 3960 return false;
3977 } 3961 }
@@ -3979,8 +3963,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
3979 err = pmu_queue_pop(pmu, queue, &msg->hdr, 3963 err = pmu_queue_pop(pmu, queue, &msg->hdr,
3980 PMU_MSG_HDR_SIZE, &bytes_read); 3964 PMU_MSG_HDR_SIZE, &bytes_read);
3981 if (err || bytes_read != PMU_MSG_HDR_SIZE) { 3965 if (err || bytes_read != PMU_MSG_HDR_SIZE) {
3982 gk20a_err(dev_from_gk20a(g), 3966 nvgpu_err(g, "fail to read msg from queue %d", queue->id);
3983 "fail to read msg from queue %d", queue->id);
3984 *status = err | -EINVAL; 3967 *status = err | -EINVAL;
3985 goto clean_up; 3968 goto clean_up;
3986 } 3969 }
@@ -3991,7 +3974,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
3991 err = pmu_queue_pop(pmu, queue, &msg->hdr, 3974 err = pmu_queue_pop(pmu, queue, &msg->hdr,
3992 PMU_MSG_HDR_SIZE, &bytes_read); 3975 PMU_MSG_HDR_SIZE, &bytes_read);
3993 if (err || bytes_read != PMU_MSG_HDR_SIZE) { 3976 if (err || bytes_read != PMU_MSG_HDR_SIZE) {
3994 gk20a_err(dev_from_gk20a(g), 3977 nvgpu_err(g,
3995 "fail to read msg from queue %d", queue->id); 3978 "fail to read msg from queue %d", queue->id);
3996 *status = err | -EINVAL; 3979 *status = err | -EINVAL;
3997 goto clean_up; 3980 goto clean_up;
@@ -3999,8 +3982,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
3999 } 3982 }
4000 3983
4001 if (!PMU_UNIT_ID_IS_VALID(msg->hdr.unit_id)) { 3984 if (!PMU_UNIT_ID_IS_VALID(msg->hdr.unit_id)) {
4002 gk20a_err(dev_from_gk20a(g), 3985 nvgpu_err(g, "read invalid unit_id %d from queue %d",
4003 "read invalid unit_id %d from queue %d",
4004 msg->hdr.unit_id, queue->id); 3986 msg->hdr.unit_id, queue->id);
4005 *status = -EINVAL; 3987 *status = -EINVAL;
4006 goto clean_up; 3988 goto clean_up;
@@ -4011,7 +3993,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
4011 err = pmu_queue_pop(pmu, queue, &msg->msg, 3993 err = pmu_queue_pop(pmu, queue, &msg->msg,
4012 read_size, &bytes_read); 3994 read_size, &bytes_read);
4013 if (err || bytes_read != read_size) { 3995 if (err || bytes_read != read_size) {
4014 gk20a_err(dev_from_gk20a(g), 3996 nvgpu_err(g,
4015 "fail to read msg from queue %d", queue->id); 3997 "fail to read msg from queue %d", queue->id);
4016 *status = err; 3998 *status = err;
4017 goto clean_up; 3999 goto clean_up;
@@ -4020,8 +4002,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
4020 4002
4021 err = pmu_queue_close(pmu, queue, true); 4003 err = pmu_queue_close(pmu, queue, true);
4022 if (err) { 4004 if (err) {
4023 gk20a_err(dev_from_gk20a(g), 4005 nvgpu_err(g, "fail to close queue %d", queue->id);
4024 "fail to close queue %d", queue->id);
4025 *status = err; 4006 *status = err;
4026 return false; 4007 return false;
4027 } 4008 }
@@ -4031,8 +4012,7 @@ static bool pmu_read_message(struct pmu_gk20a *pmu, struct pmu_queue *queue,
4031clean_up: 4012clean_up:
4032 err = pmu_queue_close(pmu, queue, false); 4013 err = pmu_queue_close(pmu, queue, false);
4033 if (err) 4014 if (err)
4034 gk20a_err(dev_from_gk20a(g), 4015 nvgpu_err(g, "fail to close queue %d", queue->id);
4035 "fail to close queue %d", queue->id);
4036 return false; 4016 return false;
4037} 4017}
4038 4018
@@ -4049,23 +4029,20 @@ static int pmu_response_handle(struct pmu_gk20a *pmu,
4049 seq = &pmu->seq[msg->hdr.seq_id]; 4029 seq = &pmu->seq[msg->hdr.seq_id];
4050 if (seq->state != PMU_SEQ_STATE_USED && 4030 if (seq->state != PMU_SEQ_STATE_USED &&
4051 seq->state != PMU_SEQ_STATE_CANCELLED) { 4031 seq->state != PMU_SEQ_STATE_CANCELLED) {
4052 gk20a_err(dev_from_gk20a(g), 4032 nvgpu_err(g, "msg for an unknown sequence %d", seq->id);
4053 "msg for an unknown sequence %d", seq->id);
4054 return -EINVAL; 4033 return -EINVAL;
4055 } 4034 }
4056 4035
4057 if (msg->hdr.unit_id == PMU_UNIT_RC && 4036 if (msg->hdr.unit_id == PMU_UNIT_RC &&
4058 msg->msg.rc.msg_type == PMU_RC_MSG_TYPE_UNHANDLED_CMD) { 4037 msg->msg.rc.msg_type == PMU_RC_MSG_TYPE_UNHANDLED_CMD) {
4059 gk20a_err(dev_from_gk20a(g), 4038 nvgpu_err(g, "unhandled cmd: seq %d", seq->id);
4060 "unhandled cmd: seq %d", seq->id);
4061 } 4039 }
4062 else if (seq->state != PMU_SEQ_STATE_CANCELLED) { 4040 else if (seq->state != PMU_SEQ_STATE_CANCELLED) {
4063 if (seq->msg) { 4041 if (seq->msg) {
4064 if (seq->msg->hdr.size >= msg->hdr.size) { 4042 if (seq->msg->hdr.size >= msg->hdr.size) {
4065 memcpy(seq->msg, msg, msg->hdr.size); 4043 memcpy(seq->msg, msg, msg->hdr.size);
4066 } else { 4044 } else {
4067 gk20a_err(dev_from_gk20a(g), 4045 nvgpu_err(g, "sequence %d msg buffer too small",
4068 "sequence %d msg buffer too small",
4069 seq->id); 4046 seq->id);
4070 } 4047 }
4071 } 4048 }
@@ -4158,7 +4135,7 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
4158 pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g), 4135 pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
4159 &pmu->zbc_save_done, 1); 4136 &pmu->zbc_save_done, 1);
4160 if (!pmu->zbc_save_done) 4137 if (!pmu->zbc_save_done)
4161 gk20a_err(dev_from_gk20a(g), "ZBC save timeout"); 4138 nvgpu_err(g, "ZBC save timeout");
4162} 4139}
4163 4140
4164static int pmu_perfmon_start_sampling(struct pmu_gk20a *pmu) 4141static int pmu_perfmon_start_sampling(struct pmu_gk20a *pmu)
@@ -4451,118 +4428,118 @@ void pmu_dump_falcon_stats(struct pmu_gk20a *pmu)
4451 struct gk20a *g = gk20a_from_pmu(pmu); 4428 struct gk20a *g = gk20a_from_pmu(pmu);
4452 unsigned int i; 4429 unsigned int i;
4453 4430
4454 gk20a_err(dev_from_gk20a(g), "pwr_falcon_os_r : %d", 4431 nvgpu_err(g, "pwr_falcon_os_r : %d",
4455 gk20a_readl(g, pwr_falcon_os_r())); 4432 gk20a_readl(g, pwr_falcon_os_r()));
4456 gk20a_err(dev_from_gk20a(g), "pwr_falcon_cpuctl_r : 0x%x", 4433 nvgpu_err(g, "pwr_falcon_cpuctl_r : 0x%x",
4457 gk20a_readl(g, pwr_falcon_cpuctl_r())); 4434 gk20a_readl(g, pwr_falcon_cpuctl_r()));
4458 gk20a_err(dev_from_gk20a(g), "pwr_falcon_idlestate_r : 0x%x", 4435 nvgpu_err(g, "pwr_falcon_idlestate_r : 0x%x",
4459 gk20a_readl(g, pwr_falcon_idlestate_r())); 4436 gk20a_readl(g, pwr_falcon_idlestate_r()));
4460 gk20a_err(dev_from_gk20a(g), "pwr_falcon_mailbox0_r : 0x%x", 4437 nvgpu_err(g, "pwr_falcon_mailbox0_r : 0x%x",
4461 gk20a_readl(g, pwr_falcon_mailbox0_r())); 4438 gk20a_readl(g, pwr_falcon_mailbox0_r()));
4462 gk20a_err(dev_from_gk20a(g), "pwr_falcon_mailbox1_r : 0x%x", 4439 nvgpu_err(g, "pwr_falcon_mailbox1_r : 0x%x",
4463 gk20a_readl(g, pwr_falcon_mailbox1_r())); 4440 gk20a_readl(g, pwr_falcon_mailbox1_r()));
4464 gk20a_err(dev_from_gk20a(g), "pwr_falcon_irqstat_r : 0x%x", 4441 nvgpu_err(g, "pwr_falcon_irqstat_r : 0x%x",
4465 gk20a_readl(g, pwr_falcon_irqstat_r())); 4442 gk20a_readl(g, pwr_falcon_irqstat_r()));
4466 gk20a_err(dev_from_gk20a(g), "pwr_falcon_irqmode_r : 0x%x", 4443 nvgpu_err(g, "pwr_falcon_irqmode_r : 0x%x",
4467 gk20a_readl(g, pwr_falcon_irqmode_r())); 4444 gk20a_readl(g, pwr_falcon_irqmode_r()));
4468 gk20a_err(dev_from_gk20a(g), "pwr_falcon_irqmask_r : 0x%x", 4445 nvgpu_err(g, "pwr_falcon_irqmask_r : 0x%x",
4469 gk20a_readl(g, pwr_falcon_irqmask_r())); 4446 gk20a_readl(g, pwr_falcon_irqmask_r()));
4470 gk20a_err(dev_from_gk20a(g), "pwr_falcon_irqdest_r : 0x%x", 4447 nvgpu_err(g, "pwr_falcon_irqdest_r : 0x%x",
4471 gk20a_readl(g, pwr_falcon_irqdest_r())); 4448 gk20a_readl(g, pwr_falcon_irqdest_r()));
4472 4449
4473 for (i = 0; i < pwr_pmu_mailbox__size_1_v(); i++) 4450 for (i = 0; i < pwr_pmu_mailbox__size_1_v(); i++)
4474 gk20a_err(dev_from_gk20a(g), "pwr_pmu_mailbox_r(%d) : 0x%x", 4451 nvgpu_err(g, "pwr_pmu_mailbox_r(%d) : 0x%x",
4475 i, gk20a_readl(g, pwr_pmu_mailbox_r(i))); 4452 i, gk20a_readl(g, pwr_pmu_mailbox_r(i)));
4476 4453
4477 for (i = 0; i < pwr_pmu_debug__size_1_v(); i++) 4454 for (i = 0; i < pwr_pmu_debug__size_1_v(); i++)
4478 gk20a_err(dev_from_gk20a(g), "pwr_pmu_debug_r(%d) : 0x%x", 4455 nvgpu_err(g, "pwr_pmu_debug_r(%d) : 0x%x",
4479 i, gk20a_readl(g, pwr_pmu_debug_r(i))); 4456 i, gk20a_readl(g, pwr_pmu_debug_r(i)));
4480 4457
4481 for (i = 0; i < 6/*NV_PPWR_FALCON_ICD_IDX_RSTAT__SIZE_1*/; i++) { 4458 for (i = 0; i < 6/*NV_PPWR_FALCON_ICD_IDX_RSTAT__SIZE_1*/; i++) {
4482 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(), 4459 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
4483 pwr_pmu_falcon_icd_cmd_opc_rstat_f() | 4460 pwr_pmu_falcon_icd_cmd_opc_rstat_f() |
4484 pwr_pmu_falcon_icd_cmd_idx_f(i)); 4461 pwr_pmu_falcon_icd_cmd_idx_f(i));
4485 gk20a_err(dev_from_gk20a(g), "pmu_rstat (%d) : 0x%x", 4462 nvgpu_err(g, "pmu_rstat (%d) : 0x%x",
4486 i, gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); 4463 i, gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
4487 } 4464 }
4488 4465
4489 i = gk20a_readl(g, pwr_pmu_bar0_error_status_r()); 4466 i = gk20a_readl(g, pwr_pmu_bar0_error_status_r());
4490 gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_error_status_r : 0x%x", i); 4467 nvgpu_err(g, "pwr_pmu_bar0_error_status_r : 0x%x", i);
4491 if (i != 0) { 4468 if (i != 0) {
4492 gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_addr_r : 0x%x", 4469 nvgpu_err(g, "pwr_pmu_bar0_addr_r : 0x%x",
4493 gk20a_readl(g, pwr_pmu_bar0_addr_r())); 4470 gk20a_readl(g, pwr_pmu_bar0_addr_r()));
4494 gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_data_r : 0x%x", 4471 nvgpu_err(g, "pwr_pmu_bar0_data_r : 0x%x",
4495 gk20a_readl(g, pwr_pmu_bar0_data_r())); 4472 gk20a_readl(g, pwr_pmu_bar0_data_r()));
4496 gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_timeout_r : 0x%x", 4473 nvgpu_err(g, "pwr_pmu_bar0_timeout_r : 0x%x",
4497 gk20a_readl(g, pwr_pmu_bar0_timeout_r())); 4474 gk20a_readl(g, pwr_pmu_bar0_timeout_r()));
4498 gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_ctl_r : 0x%x", 4475 nvgpu_err(g, "pwr_pmu_bar0_ctl_r : 0x%x",
4499 gk20a_readl(g, pwr_pmu_bar0_ctl_r())); 4476 gk20a_readl(g, pwr_pmu_bar0_ctl_r()));
4500 } 4477 }
4501 4478
4502 i = gk20a_readl(g, pwr_pmu_bar0_fecs_error_r()); 4479 i = gk20a_readl(g, pwr_pmu_bar0_fecs_error_r());
4503 gk20a_err(dev_from_gk20a(g), "pwr_pmu_bar0_fecs_error_r : 0x%x", i); 4480 nvgpu_err(g, "pwr_pmu_bar0_fecs_error_r : 0x%x", i);
4504 4481
4505 i = gk20a_readl(g, pwr_falcon_exterrstat_r()); 4482 i = gk20a_readl(g, pwr_falcon_exterrstat_r());
4506 gk20a_err(dev_from_gk20a(g), "pwr_falcon_exterrstat_r : 0x%x", i); 4483 nvgpu_err(g, "pwr_falcon_exterrstat_r : 0x%x", i);
4507 if (pwr_falcon_exterrstat_valid_v(i) == 4484 if (pwr_falcon_exterrstat_valid_v(i) ==
4508 pwr_falcon_exterrstat_valid_true_v()) { 4485 pwr_falcon_exterrstat_valid_true_v()) {
4509 gk20a_err(dev_from_gk20a(g), "pwr_falcon_exterraddr_r : 0x%x", 4486 nvgpu_err(g, "pwr_falcon_exterraddr_r : 0x%x",
4510 gk20a_readl(g, pwr_falcon_exterraddr_r())); 4487 gk20a_readl(g, pwr_falcon_exterraddr_r()));
4511 gk20a_err(dev_from_gk20a(g), "pmc_enable : 0x%x", 4488 nvgpu_err(g, "pmc_enable : 0x%x",
4512 gk20a_readl(g, mc_enable_r())); 4489 gk20a_readl(g, mc_enable_r()));
4513 } 4490 }
4514 4491
4515 gk20a_err(dev_from_gk20a(g), "pwr_falcon_engctl_r : 0x%x", 4492 nvgpu_err(g, "pwr_falcon_engctl_r : 0x%x",
4516 gk20a_readl(g, pwr_falcon_engctl_r())); 4493 gk20a_readl(g, pwr_falcon_engctl_r()));
4517 gk20a_err(dev_from_gk20a(g), "pwr_falcon_curctx_r : 0x%x", 4494 nvgpu_err(g, "pwr_falcon_curctx_r : 0x%x",
4518 gk20a_readl(g, pwr_falcon_curctx_r())); 4495 gk20a_readl(g, pwr_falcon_curctx_r()));
4519 gk20a_err(dev_from_gk20a(g), "pwr_falcon_nxtctx_r : 0x%x", 4496 nvgpu_err(g, "pwr_falcon_nxtctx_r : 0x%x",
4520 gk20a_readl(g, pwr_falcon_nxtctx_r())); 4497 gk20a_readl(g, pwr_falcon_nxtctx_r()));
4521 4498
4522 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(), 4499 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
4523 pwr_pmu_falcon_icd_cmd_opc_rreg_f() | 4500 pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
4524 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_IMB)); 4501 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_IMB));
4525 gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_IMB : 0x%x", 4502 nvgpu_err(g, "PMU_FALCON_REG_IMB : 0x%x",
4526 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); 4503 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
4527 4504
4528 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(), 4505 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
4529 pwr_pmu_falcon_icd_cmd_opc_rreg_f() | 4506 pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
4530 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_DMB)); 4507 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_DMB));
4531 gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_DMB : 0x%x", 4508 nvgpu_err(g, "PMU_FALCON_REG_DMB : 0x%x",
4532 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); 4509 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
4533 4510
4534 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(), 4511 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
4535 pwr_pmu_falcon_icd_cmd_opc_rreg_f() | 4512 pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
4536 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_CSW)); 4513 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_CSW));
4537 gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_CSW : 0x%x", 4514 nvgpu_err(g, "PMU_FALCON_REG_CSW : 0x%x",
4538 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); 4515 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
4539 4516
4540 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(), 4517 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
4541 pwr_pmu_falcon_icd_cmd_opc_rreg_f() | 4518 pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
4542 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_CTX)); 4519 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_CTX));
4543 gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_CTX : 0x%x", 4520 nvgpu_err(g, "PMU_FALCON_REG_CTX : 0x%x",
4544 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); 4521 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
4545 4522
4546 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(), 4523 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
4547 pwr_pmu_falcon_icd_cmd_opc_rreg_f() | 4524 pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
4548 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_EXCI)); 4525 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_EXCI));
4549 gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_EXCI : 0x%x", 4526 nvgpu_err(g, "PMU_FALCON_REG_EXCI : 0x%x",
4550 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); 4527 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
4551 4528
4552 for (i = 0; i < 4; i++) { 4529 for (i = 0; i < 4; i++) {
4553 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(), 4530 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
4554 pwr_pmu_falcon_icd_cmd_opc_rreg_f() | 4531 pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
4555 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_PC)); 4532 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_PC));
4556 gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_PC : 0x%x", 4533 nvgpu_err(g, "PMU_FALCON_REG_PC : 0x%x",
4557 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); 4534 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
4558 4535
4559 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(), 4536 gk20a_writel(g, pwr_pmu_falcon_icd_cmd_r(),
4560 pwr_pmu_falcon_icd_cmd_opc_rreg_f() | 4537 pwr_pmu_falcon_icd_cmd_opc_rreg_f() |
4561 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_SP)); 4538 pwr_pmu_falcon_icd_cmd_idx_f(PMU_FALCON_REG_SP));
4562 gk20a_err(dev_from_gk20a(g), "PMU_FALCON_REG_SP : 0x%x", 4539 nvgpu_err(g, "PMU_FALCON_REG_SP : 0x%x",
4563 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r())); 4540 gk20a_readl(g, pwr_pmu_falcon_icd_rdata_r()));
4564 } 4541 }
4565 gk20a_err(dev_from_gk20a(g), "elpg stat: %d\n", 4542 nvgpu_err(g, "elpg stat: %d\n",
4566 pmu->elpg_stat); 4543 pmu->elpg_stat);
4567 4544
4568 /* PMU may crash due to FECS crash. Dump FECS status */ 4545 /* PMU may crash due to FECS crash. Dump FECS status */
@@ -4600,8 +4577,7 @@ void gk20a_pmu_isr(struct gk20a *g)
4600 } 4577 }
4601 4578
4602 if (intr & pwr_falcon_irqstat_halt_true_f()) { 4579 if (intr & pwr_falcon_irqstat_halt_true_f()) {
4603 gk20a_err(dev_from_gk20a(g), 4580 nvgpu_err(g, "pmu halt intr not implemented");
4604 "pmu halt intr not implemented");
4605 pmu_dump_falcon_stats(pmu); 4581 pmu_dump_falcon_stats(pmu);
4606 if (gk20a_readl(g, pwr_pmu_mailbox_r 4582 if (gk20a_readl(g, pwr_pmu_mailbox_r
4607 (PMU_MODE_MISMATCH_STATUS_MAILBOX_R)) == 4583 (PMU_MODE_MISMATCH_STATUS_MAILBOX_R)) ==
@@ -4610,7 +4586,7 @@ void gk20a_pmu_isr(struct gk20a *g)
4610 g->ops.pmu.dump_secure_fuses(g); 4586 g->ops.pmu.dump_secure_fuses(g);
4611 } 4587 }
4612 if (intr & pwr_falcon_irqstat_exterr_true_f()) { 4588 if (intr & pwr_falcon_irqstat_exterr_true_f()) {
4613 gk20a_err(dev_from_gk20a(g), 4589 nvgpu_err(g,
4614 "pmu exterr intr not implemented. Clearing interrupt."); 4590 "pmu exterr intr not implemented. Clearing interrupt.");
4615 pmu_dump_falcon_stats(pmu); 4591 pmu_dump_falcon_stats(pmu);
4616 4592
@@ -4692,7 +4668,7 @@ static bool pmu_validate_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd,
4692 return true; 4668 return true;
4693 4669
4694invalid_cmd: 4670invalid_cmd:
4695 gk20a_err(dev_from_gk20a(g), "invalid pmu cmd :\n" 4671 nvgpu_err(g, "invalid pmu cmd :\n"
4696 "queue_id=%d,\n" 4672 "queue_id=%d,\n"
4697 "cmd_size=%d, cmd_unit_id=%d, msg=%p, msg_size=%d,\n" 4673 "cmd_size=%d, cmd_unit_id=%d, msg=%p, msg_size=%d,\n"
4698 "payload in=%p, in_size=%d, in_offset=%d,\n" 4674 "payload in=%p, in_size=%d, in_offset=%d,\n"
@@ -4736,8 +4712,7 @@ static int pmu_write_cmd(struct pmu_gk20a *pmu, struct pmu_cmd *cmd,
4736 4712
4737clean_up: 4713clean_up:
4738 if (err) 4714 if (err)
4739 gk20a_err(dev_from_gk20a(g), 4715 nvgpu_err(g, "fail to write cmd to queue %d", queue_id);
4740 "fail to write cmd to queue %d", queue_id);
4741 else 4716 else
4742 gk20a_dbg_fn("done"); 4717 gk20a_dbg_fn("done");
4743 4718
@@ -4762,7 +4737,7 @@ int gk20a_pmu_vidmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
4762 4737
4763 err = nvgpu_dma_alloc_map_vid(vm, size, mem); 4738 err = nvgpu_dma_alloc_map_vid(vm, size, mem);
4764 if (err) { 4739 if (err) {
4765 gk20a_err(g->dev, "memory allocation failed"); 4740 nvgpu_err(g, "memory allocation failed");
4766 return -ENOMEM; 4741 return -ENOMEM;
4767 } 4742 }
4768 4743
@@ -4778,7 +4753,7 @@ int gk20a_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
4778 4753
4779 err = nvgpu_dma_alloc_map_sys(vm, size, mem); 4754 err = nvgpu_dma_alloc_map_sys(vm, size, mem);
4780 if (err) { 4755 if (err) {
4781 gk20a_err(g->dev, "failed to allocate memory\n"); 4756 nvgpu_err(g, "failed to allocate memory\n");
4782 return -ENOMEM; 4757 return -ENOMEM;
4783 } 4758 }
4784 4759
@@ -4806,14 +4781,11 @@ int gk20a_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
4806 4781
4807 if ((!cmd) || (!seq_desc) || (!pmu->pmu_ready)) { 4782 if ((!cmd) || (!seq_desc) || (!pmu->pmu_ready)) {
4808 if (!cmd) 4783 if (!cmd)
4809 gk20a_warn(dev_from_gk20a(g), 4784 nvgpu_warn(g, "%s(): PMU cmd buffer is NULL", __func__);
4810 "%s(): PMU cmd buffer is NULL", __func__);
4811 else if (!seq_desc) 4785 else if (!seq_desc)
4812 gk20a_warn(dev_from_gk20a(g), 4786 nvgpu_warn(g, "%s(): Seq descriptor is NULL", __func__);
4813 "%s(): Seq descriptor is NULL", __func__);
4814 else 4787 else
4815 gk20a_warn(dev_from_gk20a(g), 4788 nvgpu_warn(g, "%s(): PMU is not ready", __func__);
4816 "%s(): PMU is not ready", __func__);
4817 4789
4818 WARN_ON(1); 4790 WARN_ON(1);
4819 return -EINVAL; 4791 return -EINVAL;
@@ -5044,9 +5016,9 @@ int gk20a_pmu_enable_elpg(struct gk20a *g)
5044 5016
5045 /* something is not right if we end up in following code path */ 5017 /* something is not right if we end up in following code path */
5046 if (unlikely(pmu->elpg_refcnt > 1)) { 5018 if (unlikely(pmu->elpg_refcnt > 1)) {
5047 gk20a_warn(dev_from_gk20a(g), 5019 nvgpu_warn(g,
5048 "%s(): possible elpg refcnt mismatch. elpg refcnt=%d", 5020 "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
5049 __func__, pmu->elpg_refcnt); 5021 __func__, pmu->elpg_refcnt);
5050 WARN_ON(1); 5022 WARN_ON(1);
5051 } 5023 }
5052 5024
@@ -5102,9 +5074,9 @@ int gk20a_pmu_disable_elpg(struct gk20a *g)
5102 5074
5103 pmu->elpg_refcnt--; 5075 pmu->elpg_refcnt--;
5104 if (pmu->elpg_refcnt > 0) { 5076 if (pmu->elpg_refcnt > 0) {
5105 gk20a_warn(dev_from_gk20a(g), 5077 nvgpu_warn(g,
5106 "%s(): possible elpg refcnt mismatch. elpg refcnt=%d", 5078 "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
5107 __func__, pmu->elpg_refcnt); 5079 __func__, pmu->elpg_refcnt);
5108 WARN_ON(1); 5080 WARN_ON(1);
5109 ret = 0; 5081 ret = 0;
5110 goto exit_unlock; 5082 goto exit_unlock;
@@ -5123,8 +5095,7 @@ int gk20a_pmu_disable_elpg(struct gk20a *g)
5123 &pmu->elpg_stat, PMU_ELPG_STAT_ON); 5095 &pmu->elpg_stat, PMU_ELPG_STAT_ON);
5124 5096
5125 if (pmu->elpg_stat != PMU_ELPG_STAT_ON) { 5097 if (pmu->elpg_stat != PMU_ELPG_STAT_ON) {
5126 gk20a_err(dev_from_gk20a(g), 5098 nvgpu_err(g, "ELPG_ALLOW_ACK failed, elpg_stat=%d",
5127 "ELPG_ALLOW_ACK failed, elpg_stat=%d",
5128 pmu->elpg_stat); 5099 pmu->elpg_stat);
5129 pmu_dump_elpg_stats(pmu); 5100 pmu_dump_elpg_stats(pmu);
5130 pmu_dump_falcon_stats(pmu); 5101 pmu_dump_falcon_stats(pmu);
@@ -5175,8 +5146,7 @@ int gk20a_pmu_disable_elpg(struct gk20a *g)
5175 gk20a_get_gr_idle_timeout(g), 5146 gk20a_get_gr_idle_timeout(g),
5176 ptr, PMU_ELPG_STAT_OFF); 5147 ptr, PMU_ELPG_STAT_OFF);
5177 if (*ptr != PMU_ELPG_STAT_OFF) { 5148 if (*ptr != PMU_ELPG_STAT_OFF) {
5178 gk20a_err(dev_from_gk20a(g), 5149 nvgpu_err(g, "ELPG_DISALLOW_ACK failed");
5179 "ELPG_DISALLOW_ACK failed");
5180 pmu_dump_elpg_stats(pmu); 5150 pmu_dump_elpg_stats(pmu);
5181 pmu_dump_falcon_stats(pmu); 5151 pmu_dump_falcon_stats(pmu);
5182 ret = -EBUSY; 5152 ret = -EBUSY;