diff options
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.h')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.h | 190 |
1 files changed, 190 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h new file mode 100644 index 00000000..2478ee1f --- /dev/null +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h | |||
@@ -0,0 +1,190 @@ | |||
1 | /* | ||
2 | * GK20A memory management | ||
3 | * | ||
4 | * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | */ | ||
24 | |||
25 | #ifndef MM_GK20A_H | ||
26 | #define MM_GK20A_H | ||
27 | |||
28 | #include <nvgpu/nvgpu_mem.h> | ||
29 | #include <nvgpu/allocator.h> | ||
30 | #include <nvgpu/vm.h> | ||
31 | #include <nvgpu/list.h> | ||
32 | #include <nvgpu/rbtree.h> | ||
33 | #include <nvgpu/kref.h> | ||
34 | |||
35 | struct gpfifo_desc { | ||
36 | struct nvgpu_mem mem; | ||
37 | u32 entry_num; | ||
38 | |||
39 | u32 get; | ||
40 | u32 put; | ||
41 | |||
42 | bool wrap; | ||
43 | |||
44 | /* if gpfifo lives in vidmem or is forced to go via PRAMIN, first copy | ||
45 | * from userspace to pipe and then from pipe to gpu buffer */ | ||
46 | void *pipe; | ||
47 | }; | ||
48 | |||
49 | struct patch_desc { | ||
50 | struct nvgpu_mem mem; | ||
51 | u32 data_count; | ||
52 | }; | ||
53 | |||
54 | struct zcull_ctx_desc { | ||
55 | u64 gpu_va; | ||
56 | u32 ctx_attr; | ||
57 | u32 ctx_sw_mode; | ||
58 | }; | ||
59 | |||
60 | struct pm_ctx_desc { | ||
61 | struct nvgpu_mem mem; | ||
62 | u32 pm_mode; | ||
63 | }; | ||
64 | |||
65 | struct compbit_store_desc { | ||
66 | struct nvgpu_mem mem; | ||
67 | |||
68 | /* The value that is written to the hardware. This depends on | ||
69 | * on the number of ltcs and is not an address. */ | ||
70 | u64 base_hw; | ||
71 | }; | ||
72 | |||
73 | struct gk20a_buffer_state { | ||
74 | struct nvgpu_list_node list; | ||
75 | |||
76 | /* The valid compbits and the fence must be changed atomically. */ | ||
77 | struct nvgpu_mutex lock; | ||
78 | |||
79 | /* Offset of the surface within the dma-buf whose state is | ||
80 | * described by this struct (one dma-buf can contain multiple | ||
81 | * surfaces with different states). */ | ||
82 | size_t offset; | ||
83 | |||
84 | /* A bitmask of valid sets of compbits (0 = uncompressed). */ | ||
85 | u32 valid_compbits; | ||
86 | |||
87 | /* The ZBC color used on this buffer. */ | ||
88 | u32 zbc_color; | ||
89 | |||
90 | /* This struct reflects the state of the buffer when this | ||
91 | * fence signals. */ | ||
92 | struct gk20a_fence *fence; | ||
93 | }; | ||
94 | |||
95 | static inline struct gk20a_buffer_state * | ||
96 | gk20a_buffer_state_from_list(struct nvgpu_list_node *node) | ||
97 | { | ||
98 | return (struct gk20a_buffer_state *) | ||
99 | ((uintptr_t)node - offsetof(struct gk20a_buffer_state, list)); | ||
100 | }; | ||
101 | |||
102 | struct priv_cmd_queue { | ||
103 | struct nvgpu_mem mem; | ||
104 | u32 size; /* num of entries in words */ | ||
105 | u32 put; /* put for priv cmd queue */ | ||
106 | u32 get; /* get for priv cmd queue */ | ||
107 | }; | ||
108 | |||
109 | struct priv_cmd_entry { | ||
110 | bool valid; | ||
111 | struct nvgpu_mem *mem; | ||
112 | u32 off; /* offset in mem, in u32 entries */ | ||
113 | u64 gva; | ||
114 | u32 get; /* start of entry in queue */ | ||
115 | u32 size; /* in words */ | ||
116 | }; | ||
117 | |||
118 | struct gk20a; | ||
119 | struct channel_gk20a; | ||
120 | |||
121 | int gk20a_mm_fb_flush(struct gk20a *g); | ||
122 | void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate); | ||
123 | void gk20a_mm_cbc_clean(struct gk20a *g); | ||
124 | void gk20a_mm_l2_invalidate(struct gk20a *g); | ||
125 | |||
126 | #define dev_from_vm(vm) dev_from_gk20a(vm->mm->g) | ||
127 | |||
128 | void gk20a_mm_ltc_isr(struct gk20a *g); | ||
129 | |||
130 | bool gk20a_mm_mmu_debug_mode_enabled(struct gk20a *g); | ||
131 | |||
132 | int gk20a_mm_mmu_vpr_info_fetch(struct gk20a *g); | ||
133 | |||
134 | int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block); | ||
135 | void gk20a_init_inst_block(struct nvgpu_mem *inst_block, struct vm_gk20a *vm, | ||
136 | u32 big_page_size); | ||
137 | int gk20a_init_mm_setup_hw(struct gk20a *g); | ||
138 | |||
139 | u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, | ||
140 | u64 map_offset, | ||
141 | struct nvgpu_sgt *sgt, | ||
142 | u64 buffer_offset, | ||
143 | u64 size, | ||
144 | int pgsz_idx, | ||
145 | u8 kind_v, | ||
146 | u32 ctag_offset, | ||
147 | u32 flags, | ||
148 | int rw_flag, | ||
149 | bool clear_ctags, | ||
150 | bool sparse, | ||
151 | bool priv, | ||
152 | struct vm_gk20a_mapping_batch *batch, | ||
153 | enum nvgpu_aperture aperture); | ||
154 | |||
155 | void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm, | ||
156 | u64 vaddr, | ||
157 | u64 size, | ||
158 | int pgsz_idx, | ||
159 | bool va_allocated, | ||
160 | int rw_flag, | ||
161 | bool sparse, | ||
162 | struct vm_gk20a_mapping_batch *batch); | ||
163 | |||
164 | /* vm-as interface */ | ||
165 | struct nvgpu_as_alloc_space_args; | ||
166 | struct nvgpu_as_free_space_args; | ||
167 | int gk20a_vm_release_share(struct gk20a_as_share *as_share); | ||
168 | int gk20a_vm_bind_channel(struct gk20a_as_share *as_share, | ||
169 | struct channel_gk20a *ch); | ||
170 | int __gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch); | ||
171 | |||
172 | void pde_range_from_vaddr_range(struct vm_gk20a *vm, | ||
173 | u64 addr_lo, u64 addr_hi, | ||
174 | u32 *pde_lo, u32 *pde_hi); | ||
175 | int gk20a_mm_pde_coverage_bit_count(struct vm_gk20a *vm); | ||
176 | u32 gk20a_mm_get_iommu_bit(struct gk20a *g); | ||
177 | |||
178 | const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g, | ||
179 | u32 big_page_size); | ||
180 | void gk20a_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *mem, | ||
181 | struct vm_gk20a *vm); | ||
182 | |||
183 | extern const struct gk20a_mmu_level gk20a_mm_levels_64k[]; | ||
184 | extern const struct gk20a_mmu_level gk20a_mm_levels_128k[]; | ||
185 | |||
186 | enum gmmu_pgsz_gk20a gk20a_get_pde_pgsz(struct gk20a *g, | ||
187 | struct nvgpu_gmmu_pd *pd, u32 pd_idx); | ||
188 | enum gmmu_pgsz_gk20a gk20a_get_pte_pgsz(struct gk20a *g, | ||
189 | struct nvgpu_gmmu_pd *pd, u32 pd_idx); | ||
190 | #endif /* MM_GK20A_H */ | ||