summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c21
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.h8
3 files changed, 12 insertions, 19 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index a34f06b2..65c3b56f 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -225,7 +225,7 @@ struct gpu_ops {
225 void (*set_gpc_tpc_mask)(struct gk20a *g, u32 gpc_index); 225 void (*set_gpc_tpc_mask)(struct gk20a *g, u32 gpc_index);
226 void (*free_channel_ctx)(struct channel_gk20a *c, bool is_tsg); 226 void (*free_channel_ctx)(struct channel_gk20a *c, bool is_tsg);
227 int (*alloc_obj_ctx)(struct channel_gk20a *c, 227 int (*alloc_obj_ctx)(struct channel_gk20a *c,
228 struct nvgpu_alloc_obj_ctx_args *args); 228 u32 class_num, u32 flags);
229 int (*bind_ctxsw_zcull)(struct gk20a *g, struct gr_gk20a *gr, 229 int (*bind_ctxsw_zcull)(struct gk20a *g, struct gr_gk20a *gr,
230 struct channel_gk20a *c, u64 zcull_va, 230 struct channel_gk20a *c, u64 zcull_va,
231 u32 mode); 231 u32 mode);
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index f78d862c..ef7136fe 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -2890,8 +2890,7 @@ void gk20a_free_channel_ctx(struct channel_gk20a *c, bool is_tsg)
2890 c->first_init = false; 2890 c->first_init = false;
2891} 2891}
2892 2892
2893int gk20a_alloc_obj_ctx(struct channel_gk20a *c, 2893int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
2894 struct nvgpu_alloc_obj_ctx_args *args)
2895{ 2894{
2896 struct gk20a *g = c->g; 2895 struct gk20a *g = c->g;
2897 struct fifo_gk20a *f = &g->fifo; 2896 struct fifo_gk20a *f = &g->fifo;
@@ -2909,13 +2908,13 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
2909 return -EINVAL; 2908 return -EINVAL;
2910 } 2909 }
2911 2910
2912 if (!g->ops.gr.is_valid_class(g, args->class_num)) { 2911 if (!g->ops.gr.is_valid_class(g, class_num)) {
2913 nvgpu_err(g, 2912 nvgpu_err(g,
2914 "invalid obj class 0x%x", args->class_num); 2913 "invalid obj class 0x%x", class_num);
2915 err = -EINVAL; 2914 err = -EINVAL;
2916 goto out; 2915 goto out;
2917 } 2916 }
2918 c->obj_class = args->class_num; 2917 c->obj_class = class_num;
2919 2918
2920 if (gk20a_is_channel_marked_as_tsg(c)) 2919 if (gk20a_is_channel_marked_as_tsg(c))
2921 tsg = &f->tsg[c->tsgid]; 2920 tsg = &f->tsg[c->tsgid];
@@ -2924,8 +2923,8 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
2924 if (!tsg) { 2923 if (!tsg) {
2925 if (!ch_ctx->gr_ctx) { 2924 if (!ch_ctx->gr_ctx) {
2926 err = gr_gk20a_alloc_channel_gr_ctx(g, c, 2925 err = gr_gk20a_alloc_channel_gr_ctx(g, c,
2927 args->class_num, 2926 class_num,
2928 args->flags); 2927 flags);
2929 if (err) { 2928 if (err) {
2930 nvgpu_err(g, 2929 nvgpu_err(g,
2931 "fail to allocate gr ctx buffer"); 2930 "fail to allocate gr ctx buffer");
@@ -2945,8 +2944,8 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
2945 tsg->vm = c->vm; 2944 tsg->vm = c->vm;
2946 nvgpu_vm_get(tsg->vm); 2945 nvgpu_vm_get(tsg->vm);
2947 err = gr_gk20a_alloc_tsg_gr_ctx(g, tsg, 2946 err = gr_gk20a_alloc_tsg_gr_ctx(g, tsg,
2948 args->class_num, 2947 class_num,
2949 args->flags); 2948 flags);
2950 if (err) { 2949 if (err) {
2951 nvgpu_err(g, 2950 nvgpu_err(g,
2952 "fail to allocate TSG gr ctx buffer"); 2951 "fail to allocate TSG gr ctx buffer");
@@ -2993,7 +2992,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
2993 } 2992 }
2994 2993
2995 /* tweak any perf parameters per-context here */ 2994 /* tweak any perf parameters per-context here */
2996 if (args->class_num == KEPLER_COMPUTE_A) { 2995 if (class_num == KEPLER_COMPUTE_A) {
2997 u32 tex_lock_disable_mask; 2996 u32 tex_lock_disable_mask;
2998 u32 texlock; 2997 u32 texlock;
2999 u32 lockboost_mask; 2998 u32 lockboost_mask;
@@ -3047,7 +3046,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3047 "failed to set texlock for compute class"); 3046 "failed to set texlock for compute class");
3048 } 3047 }
3049 3048
3050 args->flags |= NVGPU_ALLOC_OBJ_FLAGS_LOCKBOOST_ZERO; 3049 flags |= NVGPU_ALLOC_OBJ_FLAGS_LOCKBOOST_ZERO;
3051 3050
3052 if (g->support_pmu && g->can_elpg) 3051 if (g->support_pmu && g->can_elpg)
3053 nvgpu_pmu_enable_elpg(g); 3052 nvgpu_pmu_enable_elpg(g);
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
index db1a9514..8a044728 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
@@ -522,13 +522,7 @@ int gk20a_init_gr_channel(struct channel_gk20a *ch_gk20a);
522 522
523int gr_gk20a_init_ctx_vars(struct gk20a *g, struct gr_gk20a *gr); 523int gr_gk20a_init_ctx_vars(struct gk20a *g, struct gr_gk20a *gr);
524 524
525struct nvgpu_alloc_obj_ctx_args; 525int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags);
526struct nvgpu_free_obj_ctx_args;
527
528int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
529 struct nvgpu_alloc_obj_ctx_args *args);
530int gk20a_free_obj_ctx(struct channel_gk20a *c,
531 struct nvgpu_free_obj_ctx_args *args);
532void gk20a_free_channel_ctx(struct channel_gk20a *c, bool is_tsg); 526void gk20a_free_channel_ctx(struct channel_gk20a *c, bool is_tsg);
533 527
534int gk20a_gr_isr(struct gk20a *g); 528int gk20a_gr_isr(struct gk20a *g);