summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
diff options
context:
space:
mode:
authorStephen Warren <swarren@nvidia.com>2017-05-22 14:27:40 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-05-26 06:34:30 -0400
commit2e338c77eac4edffb94c8c9480dbd72712c7696f (patch)
tree9d5da6355bc653ef2ba9014ea696ad0391c523a1 /drivers/gpu/nvgpu/gk20a/gr_gk20a.c
parent726900b8433294fd89a6d730d2fec9de8e33afda (diff)
gpu: nvgpu: remove duplicate \n from log messages
nvgpu_log/info/warn/err() internally add a \n to the end of the message. Hence, callers should not include a \n at the end of the message. Doing so results in duplicate \n being printed, which ends up creating empty log messages. Remove the duplicate \n from all err/warn messages. Bug 1928311 Change-Id: I99362c5327f36146f28ba63d4e68181589735c39 Signed-off-by: Stephen Warren <swarren@nvidia.com> Reviewed-on: http://git-master/r/1487232 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gr_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c98
1 files changed, 49 insertions, 49 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 2b1013a0..f9c76ae5 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -784,13 +784,13 @@ static int gr_gk20a_ctx_zcull_setup(struct gk20a *g, struct channel_gk20a *c)
784 784
785 ret = gk20a_disable_channel_tsg(g, c); 785 ret = gk20a_disable_channel_tsg(g, c);
786 if (ret) { 786 if (ret) {
787 nvgpu_err(g, "failed to disable channel/TSG\n"); 787 nvgpu_err(g, "failed to disable channel/TSG");
788 goto clean_up; 788 goto clean_up;
789 } 789 }
790 ret = gk20a_fifo_preempt(g, c); 790 ret = gk20a_fifo_preempt(g, c);
791 if (ret) { 791 if (ret) {
792 gk20a_enable_channel_tsg(g, c); 792 gk20a_enable_channel_tsg(g, c);
793 nvgpu_err(g, "failed to preempt channel/TSG\n"); 793 nvgpu_err(g, "failed to preempt channel/TSG");
794 goto clean_up; 794 goto clean_up;
795 } 795 }
796 796
@@ -1857,13 +1857,13 @@ int gr_gk20a_update_smpc_ctxsw_mode(struct gk20a *g,
1857 1857
1858 ret = gk20a_disable_channel_tsg(g, c); 1858 ret = gk20a_disable_channel_tsg(g, c);
1859 if (ret) { 1859 if (ret) {
1860 nvgpu_err(g, "failed to disable channel/TSG\n"); 1860 nvgpu_err(g, "failed to disable channel/TSG");
1861 goto out; 1861 goto out;
1862 } 1862 }
1863 ret = gk20a_fifo_preempt(g, c); 1863 ret = gk20a_fifo_preempt(g, c);
1864 if (ret) { 1864 if (ret) {
1865 gk20a_enable_channel_tsg(g, c); 1865 gk20a_enable_channel_tsg(g, c);
1866 nvgpu_err(g, "failed to preempt channel/TSG\n"); 1866 nvgpu_err(g, "failed to preempt channel/TSG");
1867 goto out; 1867 goto out;
1868 } 1868 }
1869 1869
@@ -1925,14 +1925,14 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1925 1925
1926 ret = gk20a_disable_channel_tsg(g, c); 1926 ret = gk20a_disable_channel_tsg(g, c);
1927 if (ret) { 1927 if (ret) {
1928 nvgpu_err(g, "failed to disable channel/TSG\n"); 1928 nvgpu_err(g, "failed to disable channel/TSG");
1929 return ret; 1929 return ret;
1930 } 1930 }
1931 1931
1932 ret = gk20a_fifo_preempt(g, c); 1932 ret = gk20a_fifo_preempt(g, c);
1933 if (ret) { 1933 if (ret) {
1934 gk20a_enable_channel_tsg(g, c); 1934 gk20a_enable_channel_tsg(g, c);
1935 nvgpu_err(g, "failed to preempt channel/TSG\n"); 1935 nvgpu_err(g, "failed to preempt channel/TSG");
1936 return ret; 1936 return ret;
1937 } 1937 }
1938 1938
@@ -2213,7 +2213,7 @@ static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g)
2213 false, 2213 false,
2214 ucode_info->surface_desc.aperture); 2214 ucode_info->surface_desc.aperture);
2215 if (!ucode_info->surface_desc.gpu_va) { 2215 if (!ucode_info->surface_desc.gpu_va) {
2216 nvgpu_err(g, "failed to update gmmu ptes\n"); 2216 nvgpu_err(g, "failed to update gmmu ptes");
2217 return -ENOMEM; 2217 return -ENOMEM;
2218 } 2218 }
2219 2219
@@ -2977,7 +2977,7 @@ static int gr_gk20a_alloc_tsg_gr_ctx(struct gk20a *g,
2977 int err; 2977 int err;
2978 2978
2979 if (!tsg->vm) { 2979 if (!tsg->vm) {
2980 nvgpu_err(tsg->g, "No address space bound\n"); 2980 nvgpu_err(tsg->g, "No address space bound");
2981 return -ENOMEM; 2981 return -ENOMEM;
2982 } 2982 }
2983 2983
@@ -3017,7 +3017,7 @@ void gr_gk20a_free_gr_ctx(struct gk20a *g,
3017void gr_gk20a_free_tsg_gr_ctx(struct tsg_gk20a *tsg) 3017void gr_gk20a_free_tsg_gr_ctx(struct tsg_gk20a *tsg)
3018{ 3018{
3019 if (!tsg->vm) { 3019 if (!tsg->vm) {
3020 nvgpu_err(tsg->g, "No address space bound\n"); 3020 nvgpu_err(tsg->g, "No address space bound");
3021 return; 3021 return;
3022 } 3022 }
3023 tsg->g->ops.gr.free_gr_ctx(tsg->g, tsg->vm, tsg->tsg_gr_ctx); 3023 tsg->g->ops.gr.free_gr_ctx(tsg->g, tsg->vm, tsg->tsg_gr_ctx);
@@ -3942,7 +3942,7 @@ static void gr_gk20a_detect_sm_arch(struct gk20a *g)
3942 if (raw_version == gr_gpc0_tpc0_sm_arch_spa_version_smkepler_lp_v()) 3942 if (raw_version == gr_gpc0_tpc0_sm_arch_spa_version_smkepler_lp_v())
3943 version = 0x320; /* SM 3.2 */ 3943 version = 0x320; /* SM 3.2 */
3944 else 3944 else
3945 nvgpu_err(g, "Unknown SM version 0x%x\n", 3945 nvgpu_err(g, "Unknown SM version 0x%x",
3946 raw_version); 3946 raw_version);
3947 3947
3948 /* on Kepler, SM version == SPA version */ 3948 /* on Kepler, SM version == SPA version */
@@ -4056,7 +4056,7 @@ clean_up:
4056 ret = gk20a_fifo_enable_engine_activity(g, gr_info); 4056 ret = gk20a_fifo_enable_engine_activity(g, gr_info);
4057 if (ret) { 4057 if (ret) {
4058 nvgpu_err(g, 4058 nvgpu_err(g,
4059 "failed to enable gr engine activity\n"); 4059 "failed to enable gr engine activity");
4060 } 4060 }
4061} 4061}
4062 4062
@@ -4181,7 +4181,7 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
4181 case GK20A_ZBC_TYPE_COLOR: 4181 case GK20A_ZBC_TYPE_COLOR:
4182 if (index >= GK20A_ZBC_TABLE_SIZE) { 4182 if (index >= GK20A_ZBC_TABLE_SIZE) {
4183 nvgpu_err(g, 4183 nvgpu_err(g,
4184 "invalid zbc color table index\n"); 4184 "invalid zbc color table index");
4185 return -EINVAL; 4185 return -EINVAL;
4186 } 4186 }
4187 for (i = 0; i < GK20A_ZBC_COLOR_VALUE_SIZE; i++) { 4187 for (i = 0; i < GK20A_ZBC_COLOR_VALUE_SIZE; i++) {
@@ -4196,7 +4196,7 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
4196 case GK20A_ZBC_TYPE_DEPTH: 4196 case GK20A_ZBC_TYPE_DEPTH:
4197 if (index >= GK20A_ZBC_TABLE_SIZE) { 4197 if (index >= GK20A_ZBC_TABLE_SIZE) {
4198 nvgpu_err(g, 4198 nvgpu_err(g,
4199 "invalid zbc depth table index\n"); 4199 "invalid zbc depth table index");
4200 return -EINVAL; 4200 return -EINVAL;
4201 } 4201 }
4202 query_params->depth = gr->zbc_dep_tbl[index].depth; 4202 query_params->depth = gr->zbc_dep_tbl[index].depth;
@@ -4209,13 +4209,13 @@ int gr_gk20a_query_zbc(struct gk20a *g, struct gr_gk20a *gr,
4209 query_params); 4209 query_params);
4210 } else { 4210 } else {
4211 nvgpu_err(g, 4211 nvgpu_err(g,
4212 "invalid zbc table type\n"); 4212 "invalid zbc table type");
4213 return -EINVAL; 4213 return -EINVAL;
4214 } 4214 }
4215 break; 4215 break;
4216 default: 4216 default:
4217 nvgpu_err(g, 4217 nvgpu_err(g,
4218 "invalid zbc table type\n"); 4218 "invalid zbc table type");
4219 return -EINVAL; 4219 return -EINVAL;
4220 } 4220 }
4221 4221
@@ -4305,7 +4305,7 @@ int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr)
4305 gr->max_default_color_index = 3; 4305 gr->max_default_color_index = 3;
4306 else { 4306 else {
4307 nvgpu_err(g, 4307 nvgpu_err(g,
4308 "fail to load default zbc color table\n"); 4308 "fail to load default zbc color table");
4309 return err; 4309 return err;
4310 } 4310 }
4311 4311
@@ -4324,7 +4324,7 @@ int gr_gk20a_load_zbc_default_table(struct gk20a *g, struct gr_gk20a *gr)
4324 gr->max_default_depth_index = 2; 4324 gr->max_default_depth_index = 2;
4325 else { 4325 else {
4326 nvgpu_err(g, 4326 nvgpu_err(g,
4327 "fail to load default zbc depth table\n"); 4327 "fail to load default zbc depth table");
4328 return err; 4328 return err;
4329 } 4329 }
4330 4330
@@ -5212,7 +5212,7 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g)
5212 if (!pmu->pg_buf.cpu_va) { 5212 if (!pmu->pg_buf.cpu_va) {
5213 err = nvgpu_dma_alloc_map_sys(vm, size, &pmu->pg_buf); 5213 err = nvgpu_dma_alloc_map_sys(vm, size, &pmu->pg_buf);
5214 if (err) { 5214 if (err) {
5215 nvgpu_err(g, "failed to allocate memory\n"); 5215 nvgpu_err(g, "failed to allocate memory");
5216 return -ENOMEM; 5216 return -ENOMEM;
5217 } 5217 }
5218 } 5218 }
@@ -5589,7 +5589,7 @@ static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g,
5589 gk20a_gr_set_error_notifier(g, isr_data, 5589 gk20a_gr_set_error_notifier(g, isr_data,
5590 NVGPU_CHANNEL_GR_SEMAPHORE_TIMEOUT); 5590 NVGPU_CHANNEL_GR_SEMAPHORE_TIMEOUT);
5591 nvgpu_err(g, 5591 nvgpu_err(g,
5592 "gr semaphore timeout\n"); 5592 "gr semaphore timeout");
5593 return -EINVAL; 5593 return -EINVAL;
5594} 5594}
5595 5595
@@ -5601,7 +5601,7 @@ static int gk20a_gr_intr_illegal_notify_pending(struct gk20a *g,
5601 NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY); 5601 NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY);
5602 /* This is an unrecoverable error, reset is needed */ 5602 /* This is an unrecoverable error, reset is needed */
5603 nvgpu_err(g, 5603 nvgpu_err(g,
5604 "gr semaphore timeout\n"); 5604 "gr semaphore timeout");
5605 return -EINVAL; 5605 return -EINVAL;
5606} 5606}
5607 5607
@@ -5615,7 +5615,7 @@ static int gk20a_gr_handle_illegal_method(struct gk20a *g,
5615 gk20a_gr_set_error_notifier(g, isr_data, 5615 gk20a_gr_set_error_notifier(g, isr_data,
5616 NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY); 5616 NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY);
5617 nvgpu_err(g, "invalid method class 0x%08x" 5617 nvgpu_err(g, "invalid method class 0x%08x"
5618 ", offset 0x%08x address 0x%08x\n", 5618 ", offset 0x%08x address 0x%08x",
5619 isr_data->class_num, isr_data->offset, isr_data->addr); 5619 isr_data->class_num, isr_data->offset, isr_data->addr);
5620 } 5620 }
5621 return ret; 5621 return ret;
@@ -5675,7 +5675,7 @@ static int gk20a_gr_handle_class_error(struct gk20a *g,
5675 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY); 5675 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY);
5676 nvgpu_err(g, 5676 nvgpu_err(g,
5677 "class error 0x%08x, offset 0x%08x," 5677 "class error 0x%08x, offset 0x%08x,"
5678 " unhandled intr 0x%08x for channel %u\n", 5678 " unhandled intr 0x%08x for channel %u",
5679 isr_data->class_num, isr_data->offset, 5679 isr_data->class_num, isr_data->offset,
5680 gr_class_error, isr_data->chid); 5680 gr_class_error, isr_data->chid);
5681 5681
@@ -5690,7 +5690,7 @@ static int gk20a_gr_handle_firmware_method(struct gk20a *g,
5690 gk20a_gr_set_error_notifier(g, isr_data, 5690 gk20a_gr_set_error_notifier(g, isr_data,
5691 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY); 5691 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY);
5692 nvgpu_err(g, 5692 nvgpu_err(g,
5693 "firmware method 0x%08x, offset 0x%08x for channel %u\n", 5693 "firmware method 0x%08x, offset 0x%08x for channel %u",
5694 isr_data->class_num, isr_data->offset, 5694 isr_data->class_num, isr_data->offset,
5695 isr_data->chid); 5695 isr_data->chid);
5696 return -EINVAL; 5696 return -EINVAL;
@@ -5768,7 +5768,7 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g,
5768 if (offset + sizeof(struct share_buffer_head) > buffer_size || 5768 if (offset + sizeof(struct share_buffer_head) > buffer_size ||
5769 offset + sizeof(struct share_buffer_head) < offset) { 5769 offset + sizeof(struct share_buffer_head) < offset) {
5770 nvgpu_err(g, 5770 nvgpu_err(g,
5771 "cyclestats buffer overrun at offset 0x%x\n", 5771 "cyclestats buffer overrun at offset 0x%x",
5772 offset); 5772 offset);
5773 break; 5773 break;
5774 } 5774 }
@@ -5786,7 +5786,7 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g,
5786 offset + sh_hdr->size > buffer_size || 5786 offset + sh_hdr->size > buffer_size ||
5787 offset + sh_hdr->size < offset) { 5787 offset + sh_hdr->size < offset) {
5788 nvgpu_err(g, 5788 nvgpu_err(g,
5789 "bad cyclestate buffer header size at offset 0x%x\n", 5789 "bad cyclestate buffer header size at offset 0x%x",
5790 offset); 5790 offset);
5791 sh_hdr->failed = true; 5791 sh_hdr->failed = true;
5792 break; 5792 break;
@@ -5810,7 +5810,7 @@ static int gk20a_gr_handle_notify_pending(struct gk20a *g,
5810 5810
5811 if (!valid) { 5811 if (!valid) {
5812 nvgpu_err(g, 5812 nvgpu_err(g,
5813 "invalid cycletstats op offset: 0x%x\n", 5813 "invalid cycletstats op offset: 0x%x",
5814 op_elem->offset_bar0); 5814 op_elem->offset_bar0);
5815 5815
5816 sh_hdr->failed = exit = true; 5816 sh_hdr->failed = exit = true;
@@ -6065,7 +6065,7 @@ static int gk20a_gr_update_sm_error_state(struct gk20a *g,
6065 6065
6066 err = gr_gk20a_disable_ctxsw(g); 6066 err = gr_gk20a_disable_ctxsw(g);
6067 if (err) { 6067 if (err) {
6068 nvgpu_err(g, "unable to stop gr ctxsw\n"); 6068 nvgpu_err(g, "unable to stop gr ctxsw");
6069 goto fail; 6069 goto fail;
6070 } 6070 }
6071 6071
@@ -6125,7 +6125,7 @@ static int gk20a_gr_clear_sm_error_state(struct gk20a *g,
6125 6125
6126 err = gr_gk20a_disable_ctxsw(g); 6126 err = gr_gk20a_disable_ctxsw(g);
6127 if (err) { 6127 if (err) {
6128 nvgpu_err(g, "unable to stop gr ctxsw\n"); 6128 nvgpu_err(g, "unable to stop gr ctxsw");
6129 goto fail; 6129 goto fail;
6130 } 6130 }
6131 6131
@@ -6179,7 +6179,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
6179 warp_esr = g->ops.gr.mask_hww_warp_esr(warp_esr); 6179 warp_esr = g->ops.gr.mask_hww_warp_esr(warp_esr);
6180 6180
6181 if (!sm_debugger_attached) { 6181 if (!sm_debugger_attached) {
6182 nvgpu_err(g, "sm hww global %08x warp %08x\n", 6182 nvgpu_err(g, "sm hww global %08x warp %08x",
6183 global_esr, warp_esr); 6183 global_esr, warp_esr);
6184 return -EFAULT; 6184 return -EFAULT;
6185 } 6185 }
@@ -6199,7 +6199,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
6199 &early_exit, 6199 &early_exit,
6200 &ignore_debugger); 6200 &ignore_debugger);
6201 if (ret) { 6201 if (ret) {
6202 nvgpu_err(g, "could not pre-process sm error!\n"); 6202 nvgpu_err(g, "could not pre-process sm error!");
6203 return ret; 6203 return ret;
6204 } 6204 }
6205 } 6205 }
@@ -6241,7 +6241,7 @@ int gr_gk20a_handle_sm_exception(struct gk20a *g, u32 gpc, u32 tpc,
6241 if (do_warp_sync) { 6241 if (do_warp_sync) {
6242 ret = gk20a_gr_lock_down_sm(g, gpc, tpc, global_mask, true); 6242 ret = gk20a_gr_lock_down_sm(g, gpc, tpc, global_mask, true);
6243 if (ret) { 6243 if (ret) {
6244 nvgpu_err(g, "sm did not lock down!\n"); 6244 nvgpu_err(g, "sm did not lock down!");
6245 return ret; 6245 return ret;
6246 } 6246 }
6247 } 6247 }
@@ -7357,7 +7357,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
7357 num_gpcs = *(u32 *)(context + ctxsw_prog_main_image_num_gpcs_o()); 7357 num_gpcs = *(u32 *)(context + ctxsw_prog_main_image_num_gpcs_o());
7358 if (gpc_num >= num_gpcs) { 7358 if (gpc_num >= num_gpcs) {
7359 nvgpu_err(g, 7359 nvgpu_err(g,
7360 "GPC 0x%08x is greater than total count 0x%08x!\n", 7360 "GPC 0x%08x is greater than total count 0x%08x!",
7361 gpc_num, num_gpcs); 7361 gpc_num, num_gpcs);
7362 return -EINVAL; 7362 return -EINVAL;
7363 } 7363 }
@@ -7378,7 +7378,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
7378 context += ctxsw_prog_ucode_header_size_in_bytes(); 7378 context += ctxsw_prog_ucode_header_size_in_bytes();
7379 if (!check_local_header_magic(context)) { 7379 if (!check_local_header_magic(context)) {
7380 nvgpu_err(g, 7380 nvgpu_err(g,
7381 "Invalid local header: magic value\n"); 7381 "Invalid local header: magic value");
7382 return -EINVAL; 7382 return -EINVAL;
7383 } 7383 }
7384 7384
@@ -7409,7 +7409,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
7409 7409
7410 if (chk_addr != addr) { 7410 if (chk_addr != addr) {
7411 nvgpu_err(g, 7411 nvgpu_err(g,
7412 "Oops addr miss-match! : 0x%08x != 0x%08x\n", 7412 "Oops addr miss-match! : 0x%08x != 0x%08x",
7413 addr, chk_addr); 7413 addr, chk_addr);
7414 return -EINVAL; 7414 return -EINVAL;
7415 } 7415 }
@@ -7440,7 +7440,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
7440 7440
7441 if (chk_addr != addr) { 7441 if (chk_addr != addr) {
7442 nvgpu_err(g, 7442 nvgpu_err(g,
7443 "Oops addr miss-match! : 0x%08x != 0x%08x\n", 7443 "Oops addr miss-match! : 0x%08x != 0x%08x",
7444 addr, chk_addr); 7444 addr, chk_addr);
7445 return -EINVAL; 7445 return -EINVAL;
7446 7446
@@ -7509,7 +7509,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
7509 * extended buffer? */ 7509 * extended buffer? */
7510 if (offset_to_segment > offset_to_segment_end) { 7510 if (offset_to_segment > offset_to_segment_end) {
7511 nvgpu_err(g, 7511 nvgpu_err(g,
7512 "Overflow ctxsw buffer! 0x%08x > 0x%08x\n", 7512 "Overflow ctxsw buffer! 0x%08x > 0x%08x",
7513 offset_to_segment, offset_to_segment_end); 7513 offset_to_segment, offset_to_segment_end);
7514 return -EINVAL; 7514 return -EINVAL;
7515 } 7515 }
@@ -7710,7 +7710,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7710 context += ctxsw_prog_ucode_header_size_in_bytes(); 7710 context += ctxsw_prog_ucode_header_size_in_bytes();
7711 if (!check_local_header_magic(context)) { 7711 if (!check_local_header_magic(context)) {
7712 nvgpu_err(g, 7712 nvgpu_err(g,
7713 "Invalid FECS local header: magic value\n"); 7713 "Invalid FECS local header: magic value");
7714 return -EINVAL; 7714 return -EINVAL;
7715 } 7715 }
7716 data32 = *(u32 *)(context + ctxsw_prog_local_priv_register_ctl_o()); 7716 data32 = *(u32 *)(context + ctxsw_prog_local_priv_register_ctl_o());
@@ -7745,7 +7745,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7745 7745
7746 if ((gpc_num + 1) > num_gpcs) { 7746 if ((gpc_num + 1) > num_gpcs) {
7747 nvgpu_err(g, 7747 nvgpu_err(g,
7748 "GPC %d not in this context buffer.\n", 7748 "GPC %d not in this context buffer.",
7749 gpc_num); 7749 gpc_num);
7750 return -EINVAL; 7750 return -EINVAL;
7751 } 7751 }
@@ -7755,7 +7755,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7755 context += ctxsw_prog_ucode_header_size_in_bytes(); 7755 context += ctxsw_prog_ucode_header_size_in_bytes();
7756 if (!check_local_header_magic(context)) { 7756 if (!check_local_header_magic(context)) {
7757 nvgpu_err(g, 7757 nvgpu_err(g,
7758 "Invalid GPCCS local header: magic value\n"); 7758 "Invalid GPCCS local header: magic value");
7759 return -EINVAL; 7759 return -EINVAL;
7760 7760
7761 } 7761 }
@@ -7772,7 +7772,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
7772 7772
7773 if ((i == gpc_num) && ((tpc_num + 1) > num_tpcs)) { 7773 if ((i == gpc_num) && ((tpc_num + 1) > num_tpcs)) {
7774 nvgpu_err(g, 7774 nvgpu_err(g,
7775 "GPC %d TPC %d not in this context buffer.\n", 7775 "GPC %d TPC %d not in this context buffer.",
7776 gpc_num, tpc_num); 7776 gpc_num, tpc_num);
7777 return -EINVAL; 7777 return -EINVAL;
7778 } 7778 }
@@ -8547,7 +8547,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
8547 8547
8548 tmp_err = gr_gk20a_enable_ctxsw(g); 8548 tmp_err = gr_gk20a_enable_ctxsw(g);
8549 if (tmp_err) { 8549 if (tmp_err) {
8550 nvgpu_err(g, "unable to restart ctxsw!\n"); 8550 nvgpu_err(g, "unable to restart ctxsw!");
8551 err = tmp_err; 8551 err = tmp_err;
8552 } 8552 }
8553 8553
@@ -8718,7 +8718,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc,
8718 nvgpu_err(g, 8718 nvgpu_err(g,
8719 "GPC%d TPC%d: timed out while trying to lock down SM", gpc, tpc); 8719 "GPC%d TPC%d: timed out while trying to lock down SM", gpc, tpc);
8720 nvgpu_err(g, 8720 nvgpu_err(g,
8721 "STATUS0(0x%x)=0x%x CONTROL0=0x%x VALID_MASK=0x%llx PAUSE_MASK=0x%llx TRAP_MASK=0x%llx\n", 8721 "STATUS0(0x%x)=0x%x CONTROL0=0x%x VALID_MASK=0x%llx PAUSE_MASK=0x%llx TRAP_MASK=0x%llx",
8722 gr_gpc0_tpc0_sm_dbgr_status0_r() + offset, dbgr_status0, dbgr_control0, 8722 gr_gpc0_tpc0_sm_dbgr_status0_r() + offset, dbgr_status0, dbgr_control0,
8723 warps_valid, warps_paused, warps_trapped); 8723 warps_valid, warps_paused, warps_trapped);
8724 8724
@@ -8739,7 +8739,7 @@ void gk20a_suspend_single_sm(struct gk20a *g,
8739 /* if an SM debugger isn't attached, skip suspend */ 8739 /* if an SM debugger isn't attached, skip suspend */
8740 if (!gk20a_gr_sm_debugger_attached(g)) { 8740 if (!gk20a_gr_sm_debugger_attached(g)) {
8741 nvgpu_err(g, 8741 nvgpu_err(g,
8742 "SM debugger not attached, skipping suspend!\n"); 8742 "SM debugger not attached, skipping suspend!");
8743 return; 8743 return;
8744 } 8744 }
8745 8745
@@ -8754,7 +8754,7 @@ void gk20a_suspend_single_sm(struct gk20a *g,
8754 global_esr_mask, check_errors); 8754 global_esr_mask, check_errors);
8755 if (err) { 8755 if (err) {
8756 nvgpu_err(g, 8756 nvgpu_err(g,
8757 "SuspendSm failed\n"); 8757 "SuspendSm failed");
8758 return; 8758 return;
8759 } 8759 }
8760} 8760}
@@ -8770,7 +8770,7 @@ void gk20a_suspend_all_sms(struct gk20a *g,
8770 /* if an SM debugger isn't attached, skip suspend */ 8770 /* if an SM debugger isn't attached, skip suspend */
8771 if (!gk20a_gr_sm_debugger_attached(g)) { 8771 if (!gk20a_gr_sm_debugger_attached(g)) {
8772 nvgpu_err(g, 8772 nvgpu_err(g,
8773 "SM debugger not attached, skipping suspend!\n"); 8773 "SM debugger not attached, skipping suspend!");
8774 return; 8774 return;
8775 } 8775 }
8776 8776
@@ -8791,7 +8791,7 @@ void gk20a_suspend_all_sms(struct gk20a *g,
8791 global_esr_mask, check_errors); 8791 global_esr_mask, check_errors);
8792 if (err) { 8792 if (err) {
8793 nvgpu_err(g, 8793 nvgpu_err(g,
8794 "SuspendAllSms failed\n"); 8794 "SuspendAllSms failed");
8795 return; 8795 return;
8796 } 8796 }
8797 } 8797 }
@@ -9099,7 +9099,7 @@ int gr_gk20a_set_sm_debug_mode(struct gk20a *g,
9099 9099
9100 err = gr_gk20a_exec_ctx_ops(ch, ops, i, i, 0); 9100 err = gr_gk20a_exec_ctx_ops(ch, ops, i, i, 0);
9101 if (err) 9101 if (err)
9102 nvgpu_err(g, "Failed to access register\n"); 9102 nvgpu_err(g, "Failed to access register");
9103 nvgpu_kfree(g, ops); 9103 nvgpu_kfree(g, ops);
9104 return err; 9104 return err;
9105} 9105}
@@ -9237,7 +9237,7 @@ int gr_gk20a_suspend_contexts(struct gk20a *g,
9237 9237
9238 err = gr_gk20a_enable_ctxsw(g); 9238 err = gr_gk20a_enable_ctxsw(g);
9239 if (err) 9239 if (err)
9240 nvgpu_err(g, "unable to restart ctxsw!\n"); 9240 nvgpu_err(g, "unable to restart ctxsw!");
9241 9241
9242 *ctx_resident_ch_fd = local_ctx_resident_ch_fd; 9242 *ctx_resident_ch_fd = local_ctx_resident_ch_fd;
9243 9243
@@ -9275,7 +9275,7 @@ int gr_gk20a_resume_contexts(struct gk20a *g,
9275 9275
9276 err = gr_gk20a_enable_ctxsw(g); 9276 err = gr_gk20a_enable_ctxsw(g);
9277 if (err) 9277 if (err)
9278 nvgpu_err(g, "unable to restart ctxsw!\n"); 9278 nvgpu_err(g, "unable to restart ctxsw!");
9279 9279
9280 *ctx_resident_ch_fd = local_ctx_resident_ch_fd; 9280 *ctx_resident_ch_fd = local_ctx_resident_ch_fd;
9281 9281