summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gr_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c33
1 files changed, 17 insertions, 16 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 90838c64..11bca5bb 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -51,6 +51,7 @@
51#include "gr_pri_gk20a.h" 51#include "gr_pri_gk20a.h"
52#include "regops_gk20a.h" 52#include "regops_gk20a.h"
53#include "dbg_gpu_gk20a.h" 53#include "dbg_gpu_gk20a.h"
54#include "semaphore_gk20a.h"
54 55
55#define BLK_SIZE (256) 56#define BLK_SIZE (256)
56 57
@@ -2174,8 +2175,8 @@ int gr_gk20a_load_ctxsw_ucode(struct gk20a *g)
2174 * In case bootloader is not supported, revert to the old way of 2175 * In case bootloader is not supported, revert to the old way of
2175 * loading gr ucode, without the faster bootstrap routine. 2176 * loading gr ucode, without the faster bootstrap routine.
2176 */ 2177 */
2177 if (g->gpu_characteristics.arch != NVHOST_GPU_ARCH_GK100 && 2178 if (g->gpu_characteristics.arch != NVGPU_GPU_ARCH_GK100 &&
2178 g->gpu_characteristics.arch != NVHOST_GPU_ARCH_GM200) { 2179 g->gpu_characteristics.arch != NVGPU_GPU_ARCH_GM200) {
2179 gr_gk20a_load_falcon_dmem(g); 2180 gr_gk20a_load_falcon_dmem(g);
2180 gr_gk20a_load_falcon_imem(g); 2181 gr_gk20a_load_falcon_imem(g);
2181 gr_gk20a_start_falcon_ucode(g); 2182 gr_gk20a_start_falcon_ucode(g);
@@ -2437,7 +2438,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2437 } 2438 }
2438 2439
2439 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 2440 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
2440 NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 2441 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
2441 gk20a_mem_flag_none); 2442 gk20a_mem_flag_none);
2442 if (!gpu_va) 2443 if (!gpu_va)
2443 goto clean_up; 2444 goto clean_up;
@@ -2454,7 +2455,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2454 } 2455 }
2455 2456
2456 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 2457 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
2457 NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 2458 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
2458 gk20a_mem_flag_none); 2459 gk20a_mem_flag_none);
2459 if (!gpu_va) 2460 if (!gpu_va)
2460 goto clean_up; 2461 goto clean_up;
@@ -2471,7 +2472,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2471 } 2472 }
2472 2473
2473 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 2474 gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
2474 NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 2475 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
2475 gk20a_mem_flag_none); 2476 gk20a_mem_flag_none);
2476 if (!gpu_va) 2477 if (!gpu_va)
2477 goto clean_up; 2478 goto clean_up;
@@ -2574,7 +2575,7 @@ static int __gr_gk20a_alloc_gr_ctx(struct gk20a *g,
2574 goto err_free; 2575 goto err_free;
2575 2576
2576 gr_ctx->gpu_va = gk20a_gmmu_map(vm, &sgt, gr_ctx->size, 2577 gr_ctx->gpu_va = gk20a_gmmu_map(vm, &sgt, gr_ctx->size,
2577 NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 2578 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
2578 gk20a_mem_flag_none); 2579 gk20a_mem_flag_none);
2579 if (!gr_ctx->gpu_va) 2580 if (!gr_ctx->gpu_va)
2580 goto err_free_sgt; 2581 goto err_free_sgt;
@@ -2780,7 +2781,7 @@ static bool gr_gk20a_is_valid_class(struct gk20a *g, u32 class_num)
2780} 2781}
2781 2782
2782int gk20a_alloc_obj_ctx(struct channel_gk20a *c, 2783int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
2783 struct nvhost_alloc_obj_ctx_args *args) 2784 struct nvgpu_alloc_obj_ctx_args *args)
2784{ 2785{
2785 struct gk20a *g = c->g; 2786 struct gk20a *g = c->g;
2786 struct fifo_gk20a *f = &g->fifo; 2787 struct fifo_gk20a *f = &g->fifo;
@@ -2943,7 +2944,7 @@ out:
2943} 2944}
2944 2945
2945int gk20a_free_obj_ctx(struct channel_gk20a *c, 2946int gk20a_free_obj_ctx(struct channel_gk20a *c,
2946 struct nvhost_free_obj_ctx_args *args) 2947 struct nvgpu_free_obj_ctx_args *args)
2947{ 2948{
2948 unsigned long timeout = gk20a_get_gr_idle_timeout(c->g); 2949 unsigned long timeout = gk20a_get_gr_idle_timeout(c->g);
2949 2950
@@ -4956,7 +4957,7 @@ static int gk20a_gr_handle_semaphore_timeout_pending(struct gk20a *g,
4956 struct channel_gk20a *ch = &f->channel[isr_data->chid]; 4957 struct channel_gk20a *ch = &f->channel[isr_data->chid];
4957 gk20a_dbg_fn(""); 4958 gk20a_dbg_fn("");
4958 gk20a_set_error_notifier(ch, 4959 gk20a_set_error_notifier(ch,
4959 NVHOST_CHANNEL_GR_SEMAPHORE_TIMEOUT); 4960 NVGPU_CHANNEL_GR_SEMAPHORE_TIMEOUT);
4960 gk20a_err(dev_from_gk20a(g), 4961 gk20a_err(dev_from_gk20a(g),
4961 "gr semaphore timeout\n"); 4962 "gr semaphore timeout\n");
4962 return -EINVAL; 4963 return -EINVAL;
@@ -4969,7 +4970,7 @@ static int gk20a_gr_intr_illegal_notify_pending(struct gk20a *g,
4969 struct channel_gk20a *ch = &f->channel[isr_data->chid]; 4970 struct channel_gk20a *ch = &f->channel[isr_data->chid];
4970 gk20a_dbg_fn(""); 4971 gk20a_dbg_fn("");
4971 gk20a_set_error_notifier(ch, 4972 gk20a_set_error_notifier(ch,
4972 NVHOST_CHANNEL_GR_ILLEGAL_NOTIFY); 4973 NVGPU_CHANNEL_GR_ILLEGAL_NOTIFY);
4973 /* This is an unrecoverable error, reset is needed */ 4974 /* This is an unrecoverable error, reset is needed */
4974 gk20a_err(dev_from_gk20a(g), 4975 gk20a_err(dev_from_gk20a(g),
4975 "gr semaphore timeout\n"); 4976 "gr semaphore timeout\n");
@@ -4997,7 +4998,7 @@ static int gk20a_gr_handle_illegal_class(struct gk20a *g,
4997 struct channel_gk20a *ch = &f->channel[isr_data->chid]; 4998 struct channel_gk20a *ch = &f->channel[isr_data->chid];
4998 gk20a_dbg_fn(""); 4999 gk20a_dbg_fn("");
4999 gk20a_set_error_notifier(ch, 5000 gk20a_set_error_notifier(ch,
5000 NVHOST_CHANNEL_GR_ERROR_SW_NOTIFY); 5001 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY);
5001 gk20a_err(dev_from_gk20a(g), 5002 gk20a_err(dev_from_gk20a(g),
5002 "invalid class 0x%08x, offset 0x%08x", 5003 "invalid class 0x%08x, offset 0x%08x",
5003 isr_data->class_num, isr_data->offset); 5004 isr_data->class_num, isr_data->offset);
@@ -5037,7 +5038,7 @@ static int gk20a_gr_handle_class_error(struct gk20a *g,
5037 gk20a_dbg_fn(""); 5038 gk20a_dbg_fn("");
5038 5039
5039 gk20a_set_error_notifier(ch, 5040 gk20a_set_error_notifier(ch,
5040 NVHOST_CHANNEL_GR_ERROR_SW_NOTIFY); 5041 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY);
5041 gk20a_err(dev_from_gk20a(g), 5042 gk20a_err(dev_from_gk20a(g),
5042 "class error 0x%08x, offset 0x%08x, unhandled intr 0x%08x for channel %u\n", 5043 "class error 0x%08x, offset 0x%08x, unhandled intr 0x%08x for channel %u\n",
5043 isr_data->class_num, isr_data->offset, 5044 isr_data->class_num, isr_data->offset,
@@ -5054,7 +5055,7 @@ static int gk20a_gr_handle_firmware_method(struct gk20a *g,
5054 gk20a_dbg_fn(""); 5055 gk20a_dbg_fn("");
5055 5056
5056 gk20a_set_error_notifier(ch, 5057 gk20a_set_error_notifier(ch,
5057 NVHOST_CHANNEL_GR_ERROR_SW_NOTIFY); 5058 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY);
5058 gk20a_err(dev_from_gk20a(g), 5059 gk20a_err(dev_from_gk20a(g),
5059 "firmware method 0x%08x, offset 0x%08x for channel %u\n", 5060 "firmware method 0x%08x, offset 0x%08x for channel %u\n",
5060 isr_data->class_num, isr_data->offset, 5061 isr_data->class_num, isr_data->offset,
@@ -5674,7 +5675,7 @@ int gk20a_gr_isr(struct gk20a *g)
5674 5675
5675 if (need_reset) 5676 if (need_reset)
5676 gk20a_set_error_notifier(ch, 5677 gk20a_set_error_notifier(ch,
5677 NVHOST_CHANNEL_GR_ERROR_SW_NOTIFY); 5678 NVGPU_CHANNEL_GR_ERROR_SW_NOTIFY);
5678 } 5679 }
5679 5680
5680 gk20a_writel(g, gr_intr_r(), gr_intr_exception_reset_f()); 5681 gk20a_writel(g, gr_intr_r(), gr_intr_exception_reset_f());
@@ -6774,7 +6775,7 @@ static int gr_gk20a_find_priv_offset_in_buffer(struct gk20a *g,
6774 6775
6775 6776
6776int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch, 6777int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
6777 struct nvhost_dbg_gpu_reg_op *ctx_ops, u32 num_ops, 6778 struct nvgpu_dbg_gpu_reg_op *ctx_ops, u32 num_ops,
6778 u32 num_ctx_wr_ops, u32 num_ctx_rd_ops) 6779 u32 num_ctx_wr_ops, u32 num_ctx_rd_ops)
6779{ 6780{
6780 struct gk20a *g = ch->g; 6781 struct gk20a *g = ch->g;
@@ -6921,7 +6922,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
6921 "ctx op invalid offset: offset=0x%x", 6922 "ctx op invalid offset: offset=0x%x",
6922 ctx_ops[i].offset); 6923 ctx_ops[i].offset);
6923 ctx_ops[i].status = 6924 ctx_ops[i].status =
6924 NVHOST_DBG_GPU_REG_OP_STATUS_INVALID_OFFSET; 6925 NVGPU_DBG_GPU_REG_OP_STATUS_INVALID_OFFSET;
6925 continue; 6926 continue;
6926 } 6927 }
6927 6928