diff options
author | Dave Airlie <airlied@redhat.com> | 2016-03-07 19:51:14 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2016-03-07 19:51:14 -0500 |
commit | 984fee64355bf5384319e2ef31f0b03273629799 (patch) | |
tree | 3d7aff6c2d0fad0b254b4e4894609e9a137c4121 /drivers | |
parent | 507d44a9e1bb01661c75b88fd866d2461ab41c9c (diff) | |
parent | 8779aa8f8b7fa397a0abe9e6af3334ea41e15836 (diff) |
Merge branch 'drm-etnaviv-next' of git://git.pengutronix.de/git/lst/linux into drm-next
Notable changes:
- correctness fixes to the GPU cache flushing when switching execution
state and when powering down the GPU
- reduction of time spent in hardirq-off context
- placement improvements to the GPU DMA linear window, allowing the
driver to properly work on i.MX6 systems with more than 2GB of RAM
* 'drm-etnaviv-next' of git://git.pengutronix.de/git/lst/linux:
drm: etnaviv: clean up submit_bo()
drm: etnaviv: clean up vram_mapping submission/retire path
drm: etnaviv: improve readability of command insertion to ring buffer
drm: etnaviv: clean up GPU command submission
drm: etnaviv: use previous GPU pipe state when pipe switching
drm: etnaviv: flush all GPU caches when stopping GPU
drm: etnaviv: track current execution state
drm: etnaviv: extract arming of semaphore
drm: etnaviv: extract replacement of WAIT command
drm: etnaviv: extract command ring reservation
drm/etnaviv: move GPU linear window to end of DMA window
drm/etnaviv: move runtime PM balance into retire worker
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_buffer.c | 219 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_drv.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_gem.c | 54 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_gem.h | 17 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 36 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 54 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_gpu.h | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_mmu.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/state_3d.xml.h | 9 |
9 files changed, 239 insertions, 159 deletions
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c index 332c55ebba6d..d8d556457427 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #include "common.xml.h" | 22 | #include "common.xml.h" |
23 | #include "state.xml.h" | 23 | #include "state.xml.h" |
24 | #include "state_3d.xml.h" | ||
24 | #include "cmdstream.xml.h" | 25 | #include "cmdstream.xml.h" |
25 | 26 | ||
26 | /* | 27 | /* |
@@ -85,10 +86,17 @@ static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer, | |||
85 | OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to)); | 86 | OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to)); |
86 | } | 87 | } |
87 | 88 | ||
88 | static void etnaviv_cmd_select_pipe(struct etnaviv_cmdbuf *buffer, u8 pipe) | 89 | static inline void CMD_SEM(struct etnaviv_cmdbuf *buffer, u32 from, u32 to) |
89 | { | 90 | { |
90 | u32 flush; | 91 | CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN, |
91 | u32 stall; | 92 | VIVS_GL_SEMAPHORE_TOKEN_FROM(from) | |
93 | VIVS_GL_SEMAPHORE_TOKEN_TO(to)); | ||
94 | } | ||
95 | |||
96 | static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu, | ||
97 | struct etnaviv_cmdbuf *buffer, u8 pipe) | ||
98 | { | ||
99 | u32 flush = 0; | ||
92 | 100 | ||
93 | /* | 101 | /* |
94 | * This assumes that if we're switching to 2D, we're switching | 102 | * This assumes that if we're switching to 2D, we're switching |
@@ -96,17 +104,13 @@ static void etnaviv_cmd_select_pipe(struct etnaviv_cmdbuf *buffer, u8 pipe) | |||
96 | * the 2D core, we need to flush the 3D depth and color caches, | 104 | * the 2D core, we need to flush the 3D depth and color caches, |
97 | * otherwise we need to flush the 2D pixel engine cache. | 105 | * otherwise we need to flush the 2D pixel engine cache. |
98 | */ | 106 | */ |
99 | if (pipe == ETNA_PIPE_2D) | 107 | if (gpu->exec_state == ETNA_PIPE_2D) |
100 | flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR; | ||
101 | else | ||
102 | flush = VIVS_GL_FLUSH_CACHE_PE2D; | 108 | flush = VIVS_GL_FLUSH_CACHE_PE2D; |
103 | 109 | else if (gpu->exec_state == ETNA_PIPE_3D) | |
104 | stall = VIVS_GL_SEMAPHORE_TOKEN_FROM(SYNC_RECIPIENT_FE) | | 110 | flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR; |
105 | VIVS_GL_SEMAPHORE_TOKEN_TO(SYNC_RECIPIENT_PE); | ||
106 | 111 | ||
107 | CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush); | 112 | CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush); |
108 | CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN, stall); | 113 | CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); |
109 | |||
110 | CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); | 114 | CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); |
111 | 115 | ||
112 | CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT, | 116 | CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT, |
@@ -131,6 +135,36 @@ static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, | |||
131 | ptr, len * 4, 0); | 135 | ptr, len * 4, 0); |
132 | } | 136 | } |
133 | 137 | ||
138 | /* | ||
139 | * Safely replace the WAIT of a waitlink with a new command and argument. | ||
140 | * The GPU may be executing this WAIT while we're modifying it, so we have | ||
141 | * to write it in a specific order to avoid the GPU branching to somewhere | ||
142 | * else. 'wl_offset' is the offset to the first byte of the WAIT command. | ||
143 | */ | ||
144 | static void etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf *buffer, | ||
145 | unsigned int wl_offset, u32 cmd, u32 arg) | ||
146 | { | ||
147 | u32 *lw = buffer->vaddr + wl_offset; | ||
148 | |||
149 | lw[1] = arg; | ||
150 | mb(); | ||
151 | lw[0] = cmd; | ||
152 | mb(); | ||
153 | } | ||
154 | |||
155 | /* | ||
156 | * Ensure that there is space in the command buffer to contiguously write | ||
157 | * 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary. | ||
158 | */ | ||
159 | static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu, | ||
160 | struct etnaviv_cmdbuf *buffer, unsigned int cmd_dwords) | ||
161 | { | ||
162 | if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size) | ||
163 | buffer->user_size = 0; | ||
164 | |||
165 | return gpu_va(gpu, buffer) + buffer->user_size; | ||
166 | } | ||
167 | |||
134 | u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu) | 168 | u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu) |
135 | { | 169 | { |
136 | struct etnaviv_cmdbuf *buffer = gpu->buffer; | 170 | struct etnaviv_cmdbuf *buffer = gpu->buffer; |
@@ -147,81 +181,79 @@ u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu) | |||
147 | void etnaviv_buffer_end(struct etnaviv_gpu *gpu) | 181 | void etnaviv_buffer_end(struct etnaviv_gpu *gpu) |
148 | { | 182 | { |
149 | struct etnaviv_cmdbuf *buffer = gpu->buffer; | 183 | struct etnaviv_cmdbuf *buffer = gpu->buffer; |
184 | unsigned int waitlink_offset = buffer->user_size - 16; | ||
185 | u32 link_target, flush = 0; | ||
150 | 186 | ||
151 | /* Replace the last WAIT with an END */ | 187 | if (gpu->exec_state == ETNA_PIPE_2D) |
152 | buffer->user_size -= 16; | 188 | flush = VIVS_GL_FLUSH_CACHE_PE2D; |
153 | 189 | else if (gpu->exec_state == ETNA_PIPE_3D) | |
154 | CMD_END(buffer); | 190 | flush = VIVS_GL_FLUSH_CACHE_DEPTH | |
155 | mb(); | 191 | VIVS_GL_FLUSH_CACHE_COLOR | |
192 | VIVS_GL_FLUSH_CACHE_TEXTURE | | ||
193 | VIVS_GL_FLUSH_CACHE_TEXTUREVS | | ||
194 | VIVS_GL_FLUSH_CACHE_SHADER_L2; | ||
195 | |||
196 | if (flush) { | ||
197 | unsigned int dwords = 7; | ||
198 | |||
199 | link_target = etnaviv_buffer_reserve(gpu, buffer, dwords); | ||
200 | |||
201 | CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); | ||
202 | CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); | ||
203 | CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush); | ||
204 | if (gpu->exec_state == ETNA_PIPE_3D) | ||
205 | CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE, | ||
206 | VIVS_TS_FLUSH_CACHE_FLUSH); | ||
207 | CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); | ||
208 | CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE); | ||
209 | CMD_END(buffer); | ||
210 | |||
211 | etnaviv_buffer_replace_wait(buffer, waitlink_offset, | ||
212 | VIV_FE_LINK_HEADER_OP_LINK | | ||
213 | VIV_FE_LINK_HEADER_PREFETCH(dwords), | ||
214 | link_target); | ||
215 | } else { | ||
216 | /* Replace the last link-wait with an "END" command */ | ||
217 | etnaviv_buffer_replace_wait(buffer, waitlink_offset, | ||
218 | VIV_FE_END_HEADER_OP_END, 0); | ||
219 | } | ||
156 | } | 220 | } |
157 | 221 | ||
222 | /* Append a command buffer to the ring buffer. */ | ||
158 | void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event, | 223 | void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event, |
159 | struct etnaviv_cmdbuf *cmdbuf) | 224 | struct etnaviv_cmdbuf *cmdbuf) |
160 | { | 225 | { |
161 | struct etnaviv_cmdbuf *buffer = gpu->buffer; | 226 | struct etnaviv_cmdbuf *buffer = gpu->buffer; |
162 | u32 *lw = buffer->vaddr + buffer->user_size - 16; | 227 | unsigned int waitlink_offset = buffer->user_size - 16; |
163 | u32 back, link_target, link_size, reserve_size, extra_size = 0; | 228 | u32 return_target, return_dwords; |
229 | u32 link_target, link_dwords; | ||
164 | 230 | ||
165 | if (drm_debug & DRM_UT_DRIVER) | 231 | if (drm_debug & DRM_UT_DRIVER) |
166 | etnaviv_buffer_dump(gpu, buffer, 0, 0x50); | 232 | etnaviv_buffer_dump(gpu, buffer, 0, 0x50); |
167 | 233 | ||
234 | link_target = gpu_va(gpu, cmdbuf); | ||
235 | link_dwords = cmdbuf->size / 8; | ||
236 | |||
168 | /* | 237 | /* |
169 | * If we need to flush the MMU prior to submitting this buffer, we | 238 | * If we need maintanence prior to submitting this buffer, we will |
170 | * will need to append a mmu flush load state, followed by a new | 239 | * need to append a mmu flush load state, followed by a new |
171 | * link to this buffer - a total of four additional words. | 240 | * link to this buffer - a total of four additional words. |
172 | */ | 241 | */ |
173 | if (gpu->mmu->need_flush || gpu->switch_context) { | 242 | if (gpu->mmu->need_flush || gpu->switch_context) { |
243 | u32 target, extra_dwords; | ||
244 | |||
174 | /* link command */ | 245 | /* link command */ |
175 | extra_size += 2; | 246 | extra_dwords = 1; |
247 | |||
176 | /* flush command */ | 248 | /* flush command */ |
177 | if (gpu->mmu->need_flush) | 249 | if (gpu->mmu->need_flush) |
178 | extra_size += 2; | 250 | extra_dwords += 1; |
251 | |||
179 | /* pipe switch commands */ | 252 | /* pipe switch commands */ |
180 | if (gpu->switch_context) | 253 | if (gpu->switch_context) |
181 | extra_size += 8; | 254 | extra_dwords += 4; |
182 | } | ||
183 | 255 | ||
184 | reserve_size = (6 + extra_size) * 4; | 256 | target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords); |
185 | |||
186 | /* | ||
187 | * if we are going to completely overflow the buffer, we need to wrap. | ||
188 | */ | ||
189 | if (buffer->user_size + reserve_size > buffer->size) | ||
190 | buffer->user_size = 0; | ||
191 | |||
192 | /* save offset back into main buffer */ | ||
193 | back = buffer->user_size + reserve_size - 6 * 4; | ||
194 | link_target = gpu_va(gpu, buffer) + buffer->user_size; | ||
195 | link_size = 6; | ||
196 | |||
197 | /* Skip over any extra instructions */ | ||
198 | link_target += extra_size * sizeof(u32); | ||
199 | |||
200 | if (drm_debug & DRM_UT_DRIVER) | ||
201 | pr_info("stream link to 0x%08x @ 0x%08x %p\n", | ||
202 | link_target, gpu_va(gpu, cmdbuf), cmdbuf->vaddr); | ||
203 | |||
204 | /* jump back from cmd to main buffer */ | ||
205 | CMD_LINK(cmdbuf, link_size, link_target); | ||
206 | |||
207 | link_target = gpu_va(gpu, cmdbuf); | ||
208 | link_size = cmdbuf->size / 8; | ||
209 | |||
210 | |||
211 | |||
212 | if (drm_debug & DRM_UT_DRIVER) { | ||
213 | print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4, | ||
214 | cmdbuf->vaddr, cmdbuf->size, 0); | ||
215 | |||
216 | pr_info("link op: %p\n", lw); | ||
217 | pr_info("link addr: %p\n", lw + 1); | ||
218 | pr_info("addr: 0x%08x\n", link_target); | ||
219 | pr_info("back: 0x%08x\n", gpu_va(gpu, buffer) + back); | ||
220 | pr_info("event: %d\n", event); | ||
221 | } | ||
222 | |||
223 | if (gpu->mmu->need_flush || gpu->switch_context) { | ||
224 | u32 new_target = gpu_va(gpu, buffer) + buffer->user_size; | ||
225 | 257 | ||
226 | if (gpu->mmu->need_flush) { | 258 | if (gpu->mmu->need_flush) { |
227 | /* Add the MMU flush */ | 259 | /* Add the MMU flush */ |
@@ -236,32 +268,59 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event, | |||
236 | } | 268 | } |
237 | 269 | ||
238 | if (gpu->switch_context) { | 270 | if (gpu->switch_context) { |
239 | etnaviv_cmd_select_pipe(buffer, cmdbuf->exec_state); | 271 | etnaviv_cmd_select_pipe(gpu, buffer, cmdbuf->exec_state); |
272 | gpu->exec_state = cmdbuf->exec_state; | ||
240 | gpu->switch_context = false; | 273 | gpu->switch_context = false; |
241 | } | 274 | } |
242 | 275 | ||
243 | /* And the link to the first buffer */ | 276 | /* And the link to the submitted buffer */ |
244 | CMD_LINK(buffer, link_size, link_target); | 277 | CMD_LINK(buffer, link_dwords, link_target); |
245 | 278 | ||
246 | /* Update the link target to point to above instructions */ | 279 | /* Update the link target to point to above instructions */ |
247 | link_target = new_target; | 280 | link_target = target; |
248 | link_size = extra_size; | 281 | link_dwords = extra_dwords; |
249 | } | 282 | } |
250 | 283 | ||
251 | /* trigger event */ | 284 | /* |
285 | * Append a LINK to the submitted command buffer to return to | ||
286 | * the ring buffer. return_target is the ring target address. | ||
287 | * We need three dwords: event, wait, link. | ||
288 | */ | ||
289 | return_dwords = 3; | ||
290 | return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords); | ||
291 | CMD_LINK(cmdbuf, return_dwords, return_target); | ||
292 | |||
293 | /* | ||
294 | * Append event, wait and link pointing back to the wait | ||
295 | * command to the ring buffer. | ||
296 | */ | ||
252 | CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) | | 297 | CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) | |
253 | VIVS_GL_EVENT_FROM_PE); | 298 | VIVS_GL_EVENT_FROM_PE); |
254 | |||
255 | /* append WAIT/LINK to main buffer */ | ||
256 | CMD_WAIT(buffer); | 299 | CMD_WAIT(buffer); |
257 | CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + (buffer->user_size - 4)); | 300 | CMD_LINK(buffer, 2, return_target + 8); |
258 | 301 | ||
259 | /* Change WAIT into a LINK command; write the address first. */ | 302 | if (drm_debug & DRM_UT_DRIVER) |
260 | *(lw + 1) = link_target; | 303 | pr_info("stream link to 0x%08x @ 0x%08x %p\n", |
261 | mb(); | 304 | return_target, gpu_va(gpu, cmdbuf), cmdbuf->vaddr); |
262 | *(lw) = VIV_FE_LINK_HEADER_OP_LINK | | 305 | |
263 | VIV_FE_LINK_HEADER_PREFETCH(link_size); | 306 | if (drm_debug & DRM_UT_DRIVER) { |
264 | mb(); | 307 | print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4, |
308 | cmdbuf->vaddr, cmdbuf->size, 0); | ||
309 | |||
310 | pr_info("link op: %p\n", buffer->vaddr + waitlink_offset); | ||
311 | pr_info("addr: 0x%08x\n", link_target); | ||
312 | pr_info("back: 0x%08x\n", return_target); | ||
313 | pr_info("event: %d\n", event); | ||
314 | } | ||
315 | |||
316 | /* | ||
317 | * Kick off the submitted command by replacing the previous | ||
318 | * WAIT with a link to the address in the ring buffer. | ||
319 | */ | ||
320 | etnaviv_buffer_replace_wait(buffer, waitlink_offset, | ||
321 | VIV_FE_LINK_HEADER_OP_LINK | | ||
322 | VIV_FE_LINK_HEADER_PREFETCH(link_dwords), | ||
323 | link_target); | ||
265 | 324 | ||
266 | if (drm_debug & DRM_UT_DRIVER) | 325 | if (drm_debug & DRM_UT_DRIVER) |
267 | etnaviv_buffer_dump(gpu, buffer, 0, 0x50); | 326 | etnaviv_buffer_dump(gpu, buffer, 0, 0x50); |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h index 1cd6046e76b1..115c5bc6d7c8 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h | |||
@@ -75,9 +75,6 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
75 | int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma); | 75 | int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma); |
76 | int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 76 | int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
77 | int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset); | 77 | int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset); |
78 | int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu, | ||
79 | struct drm_gem_object *obj, u32 *iova); | ||
80 | void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj); | ||
81 | struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj); | 78 | struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj); |
82 | void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj); | 79 | void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj); |
83 | void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); | 80 | void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 4b519e4309b2..937a77520f58 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c | |||
@@ -260,8 +260,32 @@ etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj, | |||
260 | return NULL; | 260 | return NULL; |
261 | } | 261 | } |
262 | 262 | ||
263 | int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu, | 263 | void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping) |
264 | struct drm_gem_object *obj, u32 *iova) | 264 | { |
265 | struct etnaviv_gem_object *etnaviv_obj = mapping->object; | ||
266 | |||
267 | drm_gem_object_reference(&etnaviv_obj->base); | ||
268 | |||
269 | mutex_lock(&etnaviv_obj->lock); | ||
270 | WARN_ON(mapping->use == 0); | ||
271 | mapping->use += 1; | ||
272 | mutex_unlock(&etnaviv_obj->lock); | ||
273 | } | ||
274 | |||
275 | void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping) | ||
276 | { | ||
277 | struct etnaviv_gem_object *etnaviv_obj = mapping->object; | ||
278 | |||
279 | mutex_lock(&etnaviv_obj->lock); | ||
280 | WARN_ON(mapping->use == 0); | ||
281 | mapping->use -= 1; | ||
282 | mutex_unlock(&etnaviv_obj->lock); | ||
283 | |||
284 | drm_gem_object_unreference_unlocked(&etnaviv_obj->base); | ||
285 | } | ||
286 | |||
287 | struct etnaviv_vram_mapping *etnaviv_gem_mapping_get( | ||
288 | struct drm_gem_object *obj, struct etnaviv_gpu *gpu) | ||
265 | { | 289 | { |
266 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | 290 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); |
267 | struct etnaviv_vram_mapping *mapping; | 291 | struct etnaviv_vram_mapping *mapping; |
@@ -329,28 +353,12 @@ int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu, | |||
329 | out: | 353 | out: |
330 | mutex_unlock(&etnaviv_obj->lock); | 354 | mutex_unlock(&etnaviv_obj->lock); |
331 | 355 | ||
332 | if (!ret) { | 356 | if (ret) |
333 | /* Take a reference on the object */ | 357 | return ERR_PTR(ret); |
334 | drm_gem_object_reference(obj); | ||
335 | *iova = mapping->iova; | ||
336 | } | ||
337 | |||
338 | return ret; | ||
339 | } | ||
340 | |||
341 | void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj) | ||
342 | { | ||
343 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | ||
344 | struct etnaviv_vram_mapping *mapping; | ||
345 | |||
346 | mutex_lock(&etnaviv_obj->lock); | ||
347 | mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu); | ||
348 | |||
349 | WARN_ON(mapping->use == 0); | ||
350 | mapping->use -= 1; | ||
351 | mutex_unlock(&etnaviv_obj->lock); | ||
352 | 358 | ||
353 | drm_gem_object_unreference_unlocked(obj); | 359 | /* Take a reference on the object */ |
360 | drm_gem_object_reference(obj); | ||
361 | return mapping; | ||
354 | } | 362 | } |
355 | 363 | ||
356 | void *etnaviv_gem_vmap(struct drm_gem_object *obj) | 364 | void *etnaviv_gem_vmap(struct drm_gem_object *obj) |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h index ab5df8147a5f..02665d8c10ee 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h | |||
@@ -88,6 +88,12 @@ static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj) | |||
88 | 88 | ||
89 | #define MAX_CMDS 4 | 89 | #define MAX_CMDS 4 |
90 | 90 | ||
91 | struct etnaviv_gem_submit_bo { | ||
92 | u32 flags; | ||
93 | struct etnaviv_gem_object *obj; | ||
94 | struct etnaviv_vram_mapping *mapping; | ||
95 | }; | ||
96 | |||
91 | /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, | 97 | /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, |
92 | * associated with the cmdstream submission for synchronization (and | 98 | * associated with the cmdstream submission for synchronization (and |
93 | * make it easier to unwind when things go wrong, etc). This only | 99 | * make it easier to unwind when things go wrong, etc). This only |
@@ -99,11 +105,7 @@ struct etnaviv_gem_submit { | |||
99 | struct ww_acquire_ctx ticket; | 105 | struct ww_acquire_ctx ticket; |
100 | u32 fence; | 106 | u32 fence; |
101 | unsigned int nr_bos; | 107 | unsigned int nr_bos; |
102 | struct { | 108 | struct etnaviv_gem_submit_bo bos[0]; |
103 | u32 flags; | ||
104 | struct etnaviv_gem_object *obj; | ||
105 | u32 iova; | ||
106 | } bos[0]; | ||
107 | }; | 109 | }; |
108 | 110 | ||
109 | int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, | 111 | int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, |
@@ -115,4 +117,9 @@ int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj); | |||
115 | struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj); | 117 | struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj); |
116 | void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj); | 118 | void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj); |
117 | 119 | ||
120 | struct etnaviv_vram_mapping *etnaviv_gem_mapping_get( | ||
121 | struct drm_gem_object *obj, struct etnaviv_gpu *gpu); | ||
122 | void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping); | ||
123 | void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping); | ||
124 | |||
118 | #endif /* __ETNAVIV_GEM_H__ */ | 125 | #endif /* __ETNAVIV_GEM_H__ */ |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 1aba01a999df..236ada93df53 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | |||
@@ -187,12 +187,10 @@ static void submit_unpin_objects(struct etnaviv_gem_submit *submit) | |||
187 | int i; | 187 | int i; |
188 | 188 | ||
189 | for (i = 0; i < submit->nr_bos; i++) { | 189 | for (i = 0; i < submit->nr_bos; i++) { |
190 | struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; | ||
191 | |||
192 | if (submit->bos[i].flags & BO_PINNED) | 190 | if (submit->bos[i].flags & BO_PINNED) |
193 | etnaviv_gem_put_iova(submit->gpu, &etnaviv_obj->base); | 191 | etnaviv_gem_mapping_unreference(submit->bos[i].mapping); |
194 | 192 | ||
195 | submit->bos[i].iova = 0; | 193 | submit->bos[i].mapping = NULL; |
196 | submit->bos[i].flags &= ~BO_PINNED; | 194 | submit->bos[i].flags &= ~BO_PINNED; |
197 | } | 195 | } |
198 | } | 196 | } |
@@ -203,22 +201,24 @@ static int submit_pin_objects(struct etnaviv_gem_submit *submit) | |||
203 | 201 | ||
204 | for (i = 0; i < submit->nr_bos; i++) { | 202 | for (i = 0; i < submit->nr_bos; i++) { |
205 | struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; | 203 | struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; |
206 | u32 iova; | 204 | struct etnaviv_vram_mapping *mapping; |
207 | 205 | ||
208 | ret = etnaviv_gem_get_iova(submit->gpu, &etnaviv_obj->base, | 206 | mapping = etnaviv_gem_mapping_get(&etnaviv_obj->base, |
209 | &iova); | 207 | submit->gpu); |
210 | if (ret) | 208 | if (IS_ERR(mapping)) { |
209 | ret = PTR_ERR(mapping); | ||
211 | break; | 210 | break; |
211 | } | ||
212 | 212 | ||
213 | submit->bos[i].flags |= BO_PINNED; | 213 | submit->bos[i].flags |= BO_PINNED; |
214 | submit->bos[i].iova = iova; | 214 | submit->bos[i].mapping = mapping; |
215 | } | 215 | } |
216 | 216 | ||
217 | return ret; | 217 | return ret; |
218 | } | 218 | } |
219 | 219 | ||
220 | static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx, | 220 | static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx, |
221 | struct etnaviv_gem_object **obj, u32 *iova) | 221 | struct etnaviv_gem_submit_bo **bo) |
222 | { | 222 | { |
223 | if (idx >= submit->nr_bos) { | 223 | if (idx >= submit->nr_bos) { |
224 | DRM_ERROR("invalid buffer index: %u (out of %u)\n", | 224 | DRM_ERROR("invalid buffer index: %u (out of %u)\n", |
@@ -226,10 +226,7 @@ static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx, | |||
226 | return -EINVAL; | 226 | return -EINVAL; |
227 | } | 227 | } |
228 | 228 | ||
229 | if (obj) | 229 | *bo = &submit->bos[idx]; |
230 | *obj = submit->bos[idx].obj; | ||
231 | if (iova) | ||
232 | *iova = submit->bos[idx].iova; | ||
233 | 230 | ||
234 | return 0; | 231 | return 0; |
235 | } | 232 | } |
@@ -245,8 +242,8 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream, | |||
245 | 242 | ||
246 | for (i = 0; i < nr_relocs; i++) { | 243 | for (i = 0; i < nr_relocs; i++) { |
247 | const struct drm_etnaviv_gem_submit_reloc *r = relocs + i; | 244 | const struct drm_etnaviv_gem_submit_reloc *r = relocs + i; |
248 | struct etnaviv_gem_object *bobj; | 245 | struct etnaviv_gem_submit_bo *bo; |
249 | u32 iova, off; | 246 | u32 off; |
250 | 247 | ||
251 | if (unlikely(r->flags)) { | 248 | if (unlikely(r->flags)) { |
252 | DRM_ERROR("invalid reloc flags\n"); | 249 | DRM_ERROR("invalid reloc flags\n"); |
@@ -268,17 +265,16 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream, | |||
268 | return -EINVAL; | 265 | return -EINVAL; |
269 | } | 266 | } |
270 | 267 | ||
271 | ret = submit_bo(submit, r->reloc_idx, &bobj, &iova); | 268 | ret = submit_bo(submit, r->reloc_idx, &bo); |
272 | if (ret) | 269 | if (ret) |
273 | return ret; | 270 | return ret; |
274 | 271 | ||
275 | if (r->reloc_offset >= | 272 | if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) { |
276 | bobj->base.size - sizeof(*ptr)) { | ||
277 | DRM_ERROR("relocation %u outside object", i); | 273 | DRM_ERROR("relocation %u outside object", i); |
278 | return -EINVAL; | 274 | return -EINVAL; |
279 | } | 275 | } |
280 | 276 | ||
281 | ptr[off] = iova + r->reloc_offset; | 277 | ptr[off] = bo->mapping->iova + r->reloc_offset; |
282 | 278 | ||
283 | last_offset = off; | 279 | last_offset = off; |
284 | } | 280 | } |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index a33162cf4f4c..d13303ce530d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c | |||
@@ -628,6 +628,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) | |||
628 | /* Now program the hardware */ | 628 | /* Now program the hardware */ |
629 | mutex_lock(&gpu->lock); | 629 | mutex_lock(&gpu->lock); |
630 | etnaviv_gpu_hw_init(gpu); | 630 | etnaviv_gpu_hw_init(gpu); |
631 | gpu->exec_state = -1; | ||
631 | mutex_unlock(&gpu->lock); | 632 | mutex_unlock(&gpu->lock); |
632 | 633 | ||
633 | pm_runtime_mark_last_busy(gpu->dev); | 634 | pm_runtime_mark_last_busy(gpu->dev); |
@@ -871,17 +872,13 @@ static void recover_worker(struct work_struct *work) | |||
871 | gpu->event[i].fence = NULL; | 872 | gpu->event[i].fence = NULL; |
872 | gpu->event[i].used = false; | 873 | gpu->event[i].used = false; |
873 | complete(&gpu->event_free); | 874 | complete(&gpu->event_free); |
874 | /* | ||
875 | * Decrement the PM count for each stuck event. This is safe | ||
876 | * even in atomic context as we use ASYNC RPM here. | ||
877 | */ | ||
878 | pm_runtime_put_autosuspend(gpu->dev); | ||
879 | } | 875 | } |
880 | spin_unlock_irqrestore(&gpu->event_spinlock, flags); | 876 | spin_unlock_irqrestore(&gpu->event_spinlock, flags); |
881 | gpu->completed_fence = gpu->active_fence; | 877 | gpu->completed_fence = gpu->active_fence; |
882 | 878 | ||
883 | etnaviv_gpu_hw_init(gpu); | 879 | etnaviv_gpu_hw_init(gpu); |
884 | gpu->switch_context = true; | 880 | gpu->switch_context = true; |
881 | gpu->exec_state = -1; | ||
885 | 882 | ||
886 | mutex_unlock(&gpu->lock); | 883 | mutex_unlock(&gpu->lock); |
887 | pm_runtime_mark_last_busy(gpu->dev); | 884 | pm_runtime_mark_last_busy(gpu->dev); |
@@ -1106,7 +1103,7 @@ struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size, | |||
1106 | size_t nr_bos) | 1103 | size_t nr_bos) |
1107 | { | 1104 | { |
1108 | struct etnaviv_cmdbuf *cmdbuf; | 1105 | struct etnaviv_cmdbuf *cmdbuf; |
1109 | size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo[0]), | 1106 | size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]), |
1110 | sizeof(*cmdbuf)); | 1107 | sizeof(*cmdbuf)); |
1111 | 1108 | ||
1112 | cmdbuf = kzalloc(sz, GFP_KERNEL); | 1109 | cmdbuf = kzalloc(sz, GFP_KERNEL); |
@@ -1150,14 +1147,23 @@ static void retire_worker(struct work_struct *work) | |||
1150 | fence_put(cmdbuf->fence); | 1147 | fence_put(cmdbuf->fence); |
1151 | 1148 | ||
1152 | for (i = 0; i < cmdbuf->nr_bos; i++) { | 1149 | for (i = 0; i < cmdbuf->nr_bos; i++) { |
1153 | struct etnaviv_gem_object *etnaviv_obj = cmdbuf->bo[i]; | 1150 | struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i]; |
1151 | struct etnaviv_gem_object *etnaviv_obj = mapping->object; | ||
1154 | 1152 | ||
1155 | atomic_dec(&etnaviv_obj->gpu_active); | 1153 | atomic_dec(&etnaviv_obj->gpu_active); |
1156 | /* drop the refcount taken in etnaviv_gpu_submit */ | 1154 | /* drop the refcount taken in etnaviv_gpu_submit */ |
1157 | etnaviv_gem_put_iova(gpu, &etnaviv_obj->base); | 1155 | etnaviv_gem_mapping_unreference(mapping); |
1158 | } | 1156 | } |
1159 | 1157 | ||
1160 | etnaviv_gpu_cmdbuf_free(cmdbuf); | 1158 | etnaviv_gpu_cmdbuf_free(cmdbuf); |
1159 | /* | ||
1160 | * We need to balance the runtime PM count caused by | ||
1161 | * each submission. Upon submission, we increment | ||
1162 | * the runtime PM counter, and allocate one event. | ||
1163 | * So here, we put the runtime PM count for each | ||
1164 | * completed event. | ||
1165 | */ | ||
1166 | pm_runtime_put_autosuspend(gpu->dev); | ||
1161 | } | 1167 | } |
1162 | 1168 | ||
1163 | gpu->retired_fence = fence; | 1169 | gpu->retired_fence = fence; |
@@ -1304,11 +1310,10 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, | |||
1304 | 1310 | ||
1305 | for (i = 0; i < submit->nr_bos; i++) { | 1311 | for (i = 0; i < submit->nr_bos; i++) { |
1306 | struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; | 1312 | struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; |
1307 | u32 iova; | ||
1308 | 1313 | ||
1309 | /* Each cmdbuf takes a refcount on the iova */ | 1314 | /* Each cmdbuf takes a refcount on the mapping */ |
1310 | etnaviv_gem_get_iova(gpu, &etnaviv_obj->base, &iova); | 1315 | etnaviv_gem_mapping_reference(submit->bos[i].mapping); |
1311 | cmdbuf->bo[i] = etnaviv_obj; | 1316 | cmdbuf->bo_map[i] = submit->bos[i].mapping; |
1312 | atomic_inc(&etnaviv_obj->gpu_active); | 1317 | atomic_inc(&etnaviv_obj->gpu_active); |
1313 | 1318 | ||
1314 | if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE) | 1319 | if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE) |
@@ -1378,15 +1383,6 @@ static irqreturn_t irq_handler(int irq, void *data) | |||
1378 | gpu->completed_fence = fence->seqno; | 1383 | gpu->completed_fence = fence->seqno; |
1379 | 1384 | ||
1380 | event_free(gpu, event); | 1385 | event_free(gpu, event); |
1381 | |||
1382 | /* | ||
1383 | * We need to balance the runtime PM count caused by | ||
1384 | * each submission. Upon submission, we increment | ||
1385 | * the runtime PM counter, and allocate one event. | ||
1386 | * So here, we put the runtime PM count for each | ||
1387 | * completed event. | ||
1388 | */ | ||
1389 | pm_runtime_put_autosuspend(gpu->dev); | ||
1390 | } | 1386 | } |
1391 | 1387 | ||
1392 | /* Retire the buffer objects in a work */ | 1388 | /* Retire the buffer objects in a work */ |
@@ -1481,6 +1477,7 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu) | |||
1481 | etnaviv_gpu_hw_init(gpu); | 1477 | etnaviv_gpu_hw_init(gpu); |
1482 | 1478 | ||
1483 | gpu->switch_context = true; | 1479 | gpu->switch_context = true; |
1480 | gpu->exec_state = -1; | ||
1484 | 1481 | ||
1485 | mutex_unlock(&gpu->lock); | 1482 | mutex_unlock(&gpu->lock); |
1486 | 1483 | ||
@@ -1569,6 +1566,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) | |||
1569 | { | 1566 | { |
1570 | struct device *dev = &pdev->dev; | 1567 | struct device *dev = &pdev->dev; |
1571 | struct etnaviv_gpu *gpu; | 1568 | struct etnaviv_gpu *gpu; |
1569 | u32 dma_mask; | ||
1572 | int err = 0; | 1570 | int err = 0; |
1573 | 1571 | ||
1574 | gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL); | 1572 | gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL); |
@@ -1579,12 +1577,16 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) | |||
1579 | mutex_init(&gpu->lock); | 1577 | mutex_init(&gpu->lock); |
1580 | 1578 | ||
1581 | /* | 1579 | /* |
1582 | * Set the GPU base address to the start of physical memory. This | 1580 | * Set the GPU linear window to be at the end of the DMA window, where |
1583 | * ensures that if we have up to 2GB, the v1 MMU can address the | 1581 | * the CMA area is likely to reside. This ensures that we are able to |
1584 | * highest memory. This is important as command buffers may be | 1582 | * map the command buffers while having the linear window overlap as |
1585 | * allocated outside of this limit. | 1583 | * much RAM as possible, so we can optimize mappings for other buffers. |
1586 | */ | 1584 | */ |
1587 | gpu->memory_base = PHYS_OFFSET; | 1585 | dma_mask = (u32)dma_get_required_mask(dev); |
1586 | if (dma_mask < PHYS_OFFSET + SZ_2G) | ||
1587 | gpu->memory_base = PHYS_OFFSET; | ||
1588 | else | ||
1589 | gpu->memory_base = dma_mask - SZ_2G + 1; | ||
1588 | 1590 | ||
1589 | /* Map registers: */ | 1591 | /* Map registers: */ |
1590 | gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev)); | 1592 | gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev)); |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index f233ac4c7c1c..f5321e2f25ff 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h | |||
@@ -23,6 +23,7 @@ | |||
23 | #include "etnaviv_drv.h" | 23 | #include "etnaviv_drv.h" |
24 | 24 | ||
25 | struct etnaviv_gem_submit; | 25 | struct etnaviv_gem_submit; |
26 | struct etnaviv_vram_mapping; | ||
26 | 27 | ||
27 | struct etnaviv_chip_identity { | 28 | struct etnaviv_chip_identity { |
28 | /* Chip model. */ | 29 | /* Chip model. */ |
@@ -103,6 +104,7 @@ struct etnaviv_gpu { | |||
103 | 104 | ||
104 | /* 'ring'-buffer: */ | 105 | /* 'ring'-buffer: */ |
105 | struct etnaviv_cmdbuf *buffer; | 106 | struct etnaviv_cmdbuf *buffer; |
107 | int exec_state; | ||
106 | 108 | ||
107 | /* bus base address of memory */ | 109 | /* bus base address of memory */ |
108 | u32 memory_base; | 110 | u32 memory_base; |
@@ -166,7 +168,7 @@ struct etnaviv_cmdbuf { | |||
166 | struct list_head node; | 168 | struct list_head node; |
167 | /* BOs attached to this command buffer */ | 169 | /* BOs attached to this command buffer */ |
168 | unsigned int nr_bos; | 170 | unsigned int nr_bos; |
169 | struct etnaviv_gem_object *bo[0]; | 171 | struct etnaviv_vram_mapping *bo_map[0]; |
170 | }; | 172 | }; |
171 | 173 | ||
172 | static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data) | 174 | static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data) |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 6743bc648dc8..29a723fabc17 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c | |||
@@ -193,7 +193,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu, | |||
193 | 193 | ||
194 | /* | 194 | /* |
195 | * Unmap the blocks which need to be reaped from the MMU. | 195 | * Unmap the blocks which need to be reaped from the MMU. |
196 | * Clear the mmu pointer to prevent the get_iova finding | 196 | * Clear the mmu pointer to prevent the mapping_get finding |
197 | * this mapping. | 197 | * this mapping. |
198 | */ | 198 | */ |
199 | list_for_each_entry_safe(m, n, &list, scan_node) { | 199 | list_for_each_entry_safe(m, n, &list, scan_node) { |
diff --git a/drivers/gpu/drm/etnaviv/state_3d.xml.h b/drivers/gpu/drm/etnaviv/state_3d.xml.h new file mode 100644 index 000000000000..d7146fd13943 --- /dev/null +++ b/drivers/gpu/drm/etnaviv/state_3d.xml.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #ifndef STATE_3D_XML | ||
2 | #define STATE_3D_XML | ||
3 | |||
4 | /* This is a cut-down version of the state_3d.xml.h file */ | ||
5 | |||
6 | #define VIVS_TS_FLUSH_CACHE 0x00001650 | ||
7 | #define VIVS_TS_FLUSH_CACHE_FLUSH 0x00000001 | ||
8 | |||
9 | #endif /* STATE_3D_XML */ | ||