summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gr_gk20a.h')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.h40
1 files changed, 27 insertions, 13 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
index 1c22923b..6cc15c94 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
@@ -28,7 +28,6 @@
28#include "gr_t19x.h" 28#include "gr_t19x.h"
29#endif 29#endif
30 30
31#include "tsg_gk20a.h"
32#include "gr_ctx_gk20a.h" 31#include "gr_ctx_gk20a.h"
33#include "mm_gk20a.h" 32#include "mm_gk20a.h"
34 33
@@ -48,6 +47,10 @@
48 47
49#define GK20A_TIMEOUT_FPGA 100000 /* 100 sec */ 48#define GK20A_TIMEOUT_FPGA 100000 /* 100 sec */
50 49
50/* Flags to be passed to g->ops.gr.alloc_obj_ctx() */
51#define NVGPU_OBJ_CTX_FLAGS_SUPPORT_GFXP (1 << 1)
52#define NVGPU_OBJ_CTX_FLAGS_SUPPORT_CILP (1 << 2)
53
51/* 54/*
52 * allocate a minimum of 1 page (4KB) worth of patch space, this is 512 entries 55 * allocate a minimum of 1 page (4KB) worth of patch space, this is 512 entries
53 * of address and data pairs 56 * of address and data pairs
@@ -64,6 +67,7 @@
64#define NVGPU_PREEMPTION_MODE_COMPUTE_CTA (1 << 1) 67#define NVGPU_PREEMPTION_MODE_COMPUTE_CTA (1 << 1)
65#define NVGPU_PREEMPTION_MODE_COMPUTE_CILP (1 << 2) 68#define NVGPU_PREEMPTION_MODE_COMPUTE_CILP (1 << 2)
66 69
70struct tsg_gk20a;
67struct channel_gk20a; 71struct channel_gk20a;
68struct nvgpu_warpstate; 72struct nvgpu_warpstate;
69 73
@@ -433,7 +437,12 @@ struct gr_gk20a {
433 437
434void gk20a_fecs_dump_falcon_stats(struct gk20a *g); 438void gk20a_fecs_dump_falcon_stats(struct gk20a *g);
435 439
436struct gr_ctx_desc { 440struct ctx_header_desc {
441 struct nvgpu_mem mem;
442};
443
444/* contexts associated with a TSG */
445struct nvgpu_gr_ctx {
437 struct nvgpu_mem mem; 446 struct nvgpu_mem mem;
438 447
439 u32 graphics_preempt_mode; 448 u32 graphics_preempt_mode;
@@ -452,10 +461,16 @@ struct gr_ctx_desc {
452 u64 virt_ctx; 461 u64 virt_ctx;
453#endif 462#endif
454 bool golden_img_loaded; 463 bool golden_img_loaded;
455};
456 464
457struct ctx_header_desc { 465 struct patch_desc patch_ctx;
458 struct nvgpu_mem mem; 466 struct zcull_ctx_desc zcull_ctx;
467 struct pm_ctx_desc pm_ctx;
468 u64 global_ctx_buffer_va[NR_GLOBAL_CTX_BUF_VA];
469 u64 global_ctx_buffer_size[NR_GLOBAL_CTX_BUF_VA];
470 int global_ctx_buffer_index[NR_GLOBAL_CTX_BUF_VA];
471 bool global_ctx_buffer_mapped;
472
473 u32 tsgid;
459}; 474};
460 475
461struct gk20a_ctxsw_ucode_segment { 476struct gk20a_ctxsw_ucode_segment {
@@ -552,7 +567,6 @@ int gk20a_init_gr_channel(struct channel_gk20a *ch_gk20a);
552int gr_gk20a_init_ctx_vars(struct gk20a *g, struct gr_gk20a *gr); 567int gr_gk20a_init_ctx_vars(struct gk20a *g, struct gr_gk20a *gr);
553 568
554int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags); 569int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags);
555void gk20a_free_channel_ctx(struct channel_gk20a *c, bool is_tsg);
556 570
557int gk20a_gr_isr(struct gk20a *g); 571int gk20a_gr_isr(struct gk20a *g);
558int gk20a_gr_nonstall_isr(struct gk20a *g); 572int gk20a_gr_nonstall_isr(struct gk20a *g);
@@ -633,17 +647,17 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
633 struct channel_gk20a *c, 647 struct channel_gk20a *c,
634 bool enable_hwpm_ctxsw); 648 bool enable_hwpm_ctxsw);
635 649
636struct channel_ctx_gk20a; 650struct nvgpu_gr_ctx;
637void gr_gk20a_ctx_patch_write(struct gk20a *g, struct channel_ctx_gk20a *ch_ctx, 651void gr_gk20a_ctx_patch_write(struct gk20a *g, struct nvgpu_gr_ctx *ch_ctx,
638 u32 addr, u32 data, bool patch); 652 u32 addr, u32 data, bool patch);
639int gr_gk20a_ctx_patch_write_begin(struct gk20a *g, 653int gr_gk20a_ctx_patch_write_begin(struct gk20a *g,
640 struct channel_ctx_gk20a *ch_ctx, 654 struct nvgpu_gr_ctx *ch_ctx,
641 bool update_patch_count); 655 bool update_patch_count);
642void gr_gk20a_ctx_patch_write_end(struct gk20a *g, 656void gr_gk20a_ctx_patch_write_end(struct gk20a *g,
643 struct channel_ctx_gk20a *ch_ctx, 657 struct nvgpu_gr_ctx *ch_ctx,
644 bool update_patch_count); 658 bool update_patch_count);
645void gr_gk20a_commit_global_pagepool(struct gk20a *g, 659void gr_gk20a_commit_global_pagepool(struct gk20a *g,
646 struct channel_ctx_gk20a *ch_ctx, 660 struct nvgpu_gr_ctx *ch_ctx,
647 u64 addr, u32 size, bool patch); 661 u64 addr, u32 size, bool patch);
648void gk20a_gr_set_shader_exceptions(struct gk20a *g, u32 data); 662void gk20a_gr_set_shader_exceptions(struct gk20a *g, u32 data);
649void gr_gk20a_enable_hww_exceptions(struct gk20a *g); 663void gr_gk20a_enable_hww_exceptions(struct gk20a *g);
@@ -694,10 +708,10 @@ int gr_gk20a_submit_fecs_method_op(struct gk20a *g,
694int gr_gk20a_submit_fecs_sideband_method_op(struct gk20a *g, 708int gr_gk20a_submit_fecs_sideband_method_op(struct gk20a *g,
695 struct fecs_method_op_gk20a op); 709 struct fecs_method_op_gk20a op);
696int gr_gk20a_alloc_gr_ctx(struct gk20a *g, 710int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
697 struct gr_ctx_desc **__gr_ctx, struct vm_gk20a *vm, 711 struct nvgpu_gr_ctx *gr_ctx, struct vm_gk20a *vm,
698 u32 class, u32 padding); 712 u32 class, u32 padding);
699void gr_gk20a_free_gr_ctx(struct gk20a *g, 713void gr_gk20a_free_gr_ctx(struct gk20a *g,
700 struct vm_gk20a *vm, struct gr_ctx_desc *gr_ctx); 714 struct vm_gk20a *vm, struct nvgpu_gr_ctx *gr_ctx);
701int gr_gk20a_halt_pipe(struct gk20a *g); 715int gr_gk20a_halt_pipe(struct gk20a *g);
702 716
703#if defined(CONFIG_GK20A_CYCLE_STATS) 717#if defined(CONFIG_GK20A_CYCLE_STATS)