diff options
author | Christian König <christian.koenig@amd.com> | 2013-08-13 05:56:54 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2013-08-30 16:30:42 -0400 |
commit | 2483b4ea982efe8a544697d3f9642932e9af4dc1 (patch) | |
tree | f739e1b55b5e200817c174d4eae6f22935d152bf /drivers/gpu/drm/radeon/cik_sdma.c | |
parent | e409b128625732926c112cc9b709fb7bb1aa387f (diff) |
drm/radeon: separate DMA code
Similar to separating the UVD code, just put the DMA
functions into separate files.
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/cik_sdma.c')
-rw-r--r-- | drivers/gpu/drm/radeon/cik_sdma.c | 785 |
1 files changed, 785 insertions, 0 deletions
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c new file mode 100644 index 000000000000..8925185a0049 --- /dev/null +++ b/drivers/gpu/drm/radeon/cik_sdma.c | |||
@@ -0,0 +1,785 @@ | |||
1 | /* | ||
2 | * Copyright 2013 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Alex Deucher | ||
23 | */ | ||
24 | #include <linux/firmware.h> | ||
25 | #include <drm/drmP.h> | ||
26 | #include "radeon.h" | ||
27 | #include "radeon_asic.h" | ||
28 | #include "cikd.h" | ||
29 | |||
30 | /* sdma */ | ||
31 | #define CIK_SDMA_UCODE_SIZE 1050 | ||
32 | #define CIK_SDMA_UCODE_VERSION 64 | ||
33 | |||
34 | u32 cik_gpu_check_soft_reset(struct radeon_device *rdev); | ||
35 | |||
36 | /* | ||
37 | * sDMA - System DMA | ||
38 | * Starting with CIK, the GPU has new asynchronous | ||
39 | * DMA engines. These engines are used for compute | ||
40 | * and gfx. There are two DMA engines (SDMA0, SDMA1) | ||
41 | * and each one supports 1 ring buffer used for gfx | ||
42 | * and 2 queues used for compute. | ||
43 | * | ||
44 | * The programming model is very similar to the CP | ||
45 | * (ring buffer, IBs, etc.), but sDMA has it's own | ||
46 | * packet format that is different from the PM4 format | ||
47 | * used by the CP. sDMA supports copying data, writing | ||
48 | * embedded data, solid fills, and a number of other | ||
49 | * things. It also has support for tiling/detiling of | ||
50 | * buffers. | ||
51 | */ | ||
52 | |||
53 | /** | ||
54 | * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine | ||
55 | * | ||
56 | * @rdev: radeon_device pointer | ||
57 | * @ib: IB object to schedule | ||
58 | * | ||
59 | * Schedule an IB in the DMA ring (CIK). | ||
60 | */ | ||
61 | void cik_sdma_ring_ib_execute(struct radeon_device *rdev, | ||
62 | struct radeon_ib *ib) | ||
63 | { | ||
64 | struct radeon_ring *ring = &rdev->ring[ib->ring]; | ||
65 | u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf; | ||
66 | |||
67 | if (rdev->wb.enabled) { | ||
68 | u32 next_rptr = ring->wptr + 5; | ||
69 | while ((next_rptr & 7) != 4) | ||
70 | next_rptr++; | ||
71 | next_rptr += 4; | ||
72 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); | ||
73 | radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); | ||
74 | radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); | ||
75 | radeon_ring_write(ring, 1); /* number of DWs to follow */ | ||
76 | radeon_ring_write(ring, next_rptr); | ||
77 | } | ||
78 | |||
79 | /* IB packet must end on a 8 DW boundary */ | ||
80 | while ((ring->wptr & 7) != 4) | ||
81 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0)); | ||
82 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits)); | ||
83 | radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */ | ||
84 | radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff); | ||
85 | radeon_ring_write(ring, ib->length_dw); | ||
86 | |||
87 | } | ||
88 | |||
89 | /** | ||
90 | * cik_sdma_fence_ring_emit - emit a fence on the DMA ring | ||
91 | * | ||
92 | * @rdev: radeon_device pointer | ||
93 | * @fence: radeon fence object | ||
94 | * | ||
95 | * Add a DMA fence packet to the ring to write | ||
96 | * the fence seq number and DMA trap packet to generate | ||
97 | * an interrupt if needed (CIK). | ||
98 | */ | ||
99 | void cik_sdma_fence_ring_emit(struct radeon_device *rdev, | ||
100 | struct radeon_fence *fence) | ||
101 | { | ||
102 | struct radeon_ring *ring = &rdev->ring[fence->ring]; | ||
103 | u64 addr = rdev->fence_drv[fence->ring].gpu_addr; | ||
104 | u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) | | ||
105 | SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ | ||
106 | u32 ref_and_mask; | ||
107 | |||
108 | if (fence->ring == R600_RING_TYPE_DMA_INDEX) | ||
109 | ref_and_mask = SDMA0; | ||
110 | else | ||
111 | ref_and_mask = SDMA1; | ||
112 | |||
113 | /* write the fence */ | ||
114 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0)); | ||
115 | radeon_ring_write(ring, addr & 0xffffffff); | ||
116 | radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff); | ||
117 | radeon_ring_write(ring, fence->seq); | ||
118 | /* generate an interrupt */ | ||
119 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0)); | ||
120 | /* flush HDP */ | ||
121 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); | ||
122 | radeon_ring_write(ring, GPU_HDP_FLUSH_DONE); | ||
123 | radeon_ring_write(ring, GPU_HDP_FLUSH_REQ); | ||
124 | radeon_ring_write(ring, ref_and_mask); /* REFERENCE */ | ||
125 | radeon_ring_write(ring, ref_and_mask); /* MASK */ | ||
126 | radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */ | ||
127 | } | ||
128 | |||
129 | /** | ||
130 | * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring | ||
131 | * | ||
132 | * @rdev: radeon_device pointer | ||
133 | * @ring: radeon_ring structure holding ring information | ||
134 | * @semaphore: radeon semaphore object | ||
135 | * @emit_wait: wait or signal semaphore | ||
136 | * | ||
137 | * Add a DMA semaphore packet to the ring wait on or signal | ||
138 | * other rings (CIK). | ||
139 | */ | ||
140 | void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, | ||
141 | struct radeon_ring *ring, | ||
142 | struct radeon_semaphore *semaphore, | ||
143 | bool emit_wait) | ||
144 | { | ||
145 | u64 addr = semaphore->gpu_addr; | ||
146 | u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S; | ||
147 | |||
148 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits)); | ||
149 | radeon_ring_write(ring, addr & 0xfffffff8); | ||
150 | radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff); | ||
151 | } | ||
152 | |||
153 | /** | ||
154 | * cik_sdma_gfx_stop - stop the gfx async dma engines | ||
155 | * | ||
156 | * @rdev: radeon_device pointer | ||
157 | * | ||
158 | * Stop the gfx async dma ring buffers (CIK). | ||
159 | */ | ||
160 | static void cik_sdma_gfx_stop(struct radeon_device *rdev) | ||
161 | { | ||
162 | u32 rb_cntl, reg_offset; | ||
163 | int i; | ||
164 | |||
165 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | ||
166 | |||
167 | for (i = 0; i < 2; i++) { | ||
168 | if (i == 0) | ||
169 | reg_offset = SDMA0_REGISTER_OFFSET; | ||
170 | else | ||
171 | reg_offset = SDMA1_REGISTER_OFFSET; | ||
172 | rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset); | ||
173 | rb_cntl &= ~SDMA_RB_ENABLE; | ||
174 | WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); | ||
175 | WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0); | ||
176 | } | ||
177 | } | ||
178 | |||
179 | /** | ||
180 | * cik_sdma_rlc_stop - stop the compute async dma engines | ||
181 | * | ||
182 | * @rdev: radeon_device pointer | ||
183 | * | ||
184 | * Stop the compute async dma queues (CIK). | ||
185 | */ | ||
186 | static void cik_sdma_rlc_stop(struct radeon_device *rdev) | ||
187 | { | ||
188 | /* XXX todo */ | ||
189 | } | ||
190 | |||
191 | /** | ||
192 | * cik_sdma_enable - stop the async dma engines | ||
193 | * | ||
194 | * @rdev: radeon_device pointer | ||
195 | * @enable: enable/disable the DMA MEs. | ||
196 | * | ||
197 | * Halt or unhalt the async dma engines (CIK). | ||
198 | */ | ||
199 | void cik_sdma_enable(struct radeon_device *rdev, bool enable) | ||
200 | { | ||
201 | u32 me_cntl, reg_offset; | ||
202 | int i; | ||
203 | |||
204 | for (i = 0; i < 2; i++) { | ||
205 | if (i == 0) | ||
206 | reg_offset = SDMA0_REGISTER_OFFSET; | ||
207 | else | ||
208 | reg_offset = SDMA1_REGISTER_OFFSET; | ||
209 | me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset); | ||
210 | if (enable) | ||
211 | me_cntl &= ~SDMA_HALT; | ||
212 | else | ||
213 | me_cntl |= SDMA_HALT; | ||
214 | WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl); | ||
215 | } | ||
216 | } | ||
217 | |||
218 | /** | ||
219 | * cik_sdma_gfx_resume - setup and start the async dma engines | ||
220 | * | ||
221 | * @rdev: radeon_device pointer | ||
222 | * | ||
223 | * Set up the gfx DMA ring buffers and enable them (CIK). | ||
224 | * Returns 0 for success, error for failure. | ||
225 | */ | ||
226 | static int cik_sdma_gfx_resume(struct radeon_device *rdev) | ||
227 | { | ||
228 | struct radeon_ring *ring; | ||
229 | u32 rb_cntl, ib_cntl; | ||
230 | u32 rb_bufsz; | ||
231 | u32 reg_offset, wb_offset; | ||
232 | int i, r; | ||
233 | |||
234 | for (i = 0; i < 2; i++) { | ||
235 | if (i == 0) { | ||
236 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; | ||
237 | reg_offset = SDMA0_REGISTER_OFFSET; | ||
238 | wb_offset = R600_WB_DMA_RPTR_OFFSET; | ||
239 | } else { | ||
240 | ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; | ||
241 | reg_offset = SDMA1_REGISTER_OFFSET; | ||
242 | wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET; | ||
243 | } | ||
244 | |||
245 | WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0); | ||
246 | WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0); | ||
247 | |||
248 | /* Set ring buffer size in dwords */ | ||
249 | rb_bufsz = drm_order(ring->ring_size / 4); | ||
250 | rb_cntl = rb_bufsz << 1; | ||
251 | #ifdef __BIG_ENDIAN | ||
252 | rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE; | ||
253 | #endif | ||
254 | WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); | ||
255 | |||
256 | /* Initialize the ring buffer's read and write pointers */ | ||
257 | WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0); | ||
258 | WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0); | ||
259 | |||
260 | /* set the wb address whether it's enabled or not */ | ||
261 | WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset, | ||
262 | upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); | ||
263 | WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset, | ||
264 | ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)); | ||
265 | |||
266 | if (rdev->wb.enabled) | ||
267 | rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE; | ||
268 | |||
269 | WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8); | ||
270 | WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40); | ||
271 | |||
272 | ring->wptr = 0; | ||
273 | WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2); | ||
274 | |||
275 | ring->rptr = RREG32(SDMA0_GFX_RB_RPTR + reg_offset) >> 2; | ||
276 | |||
277 | /* enable DMA RB */ | ||
278 | WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE); | ||
279 | |||
280 | ib_cntl = SDMA_IB_ENABLE; | ||
281 | #ifdef __BIG_ENDIAN | ||
282 | ib_cntl |= SDMA_IB_SWAP_ENABLE; | ||
283 | #endif | ||
284 | /* enable DMA IBs */ | ||
285 | WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl); | ||
286 | |||
287 | ring->ready = true; | ||
288 | |||
289 | r = radeon_ring_test(rdev, ring->idx, ring); | ||
290 | if (r) { | ||
291 | ring->ready = false; | ||
292 | return r; | ||
293 | } | ||
294 | } | ||
295 | |||
296 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); | ||
297 | |||
298 | return 0; | ||
299 | } | ||
300 | |||
301 | /** | ||
302 | * cik_sdma_rlc_resume - setup and start the async dma engines | ||
303 | * | ||
304 | * @rdev: radeon_device pointer | ||
305 | * | ||
306 | * Set up the compute DMA queues and enable them (CIK). | ||
307 | * Returns 0 for success, error for failure. | ||
308 | */ | ||
309 | static int cik_sdma_rlc_resume(struct radeon_device *rdev) | ||
310 | { | ||
311 | /* XXX todo */ | ||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | /** | ||
316 | * cik_sdma_load_microcode - load the sDMA ME ucode | ||
317 | * | ||
318 | * @rdev: radeon_device pointer | ||
319 | * | ||
320 | * Loads the sDMA0/1 ucode. | ||
321 | * Returns 0 for success, -EINVAL if the ucode is not available. | ||
322 | */ | ||
323 | static int cik_sdma_load_microcode(struct radeon_device *rdev) | ||
324 | { | ||
325 | const __be32 *fw_data; | ||
326 | int i; | ||
327 | |||
328 | if (!rdev->sdma_fw) | ||
329 | return -EINVAL; | ||
330 | |||
331 | /* stop the gfx rings and rlc compute queues */ | ||
332 | cik_sdma_gfx_stop(rdev); | ||
333 | cik_sdma_rlc_stop(rdev); | ||
334 | |||
335 | /* halt the MEs */ | ||
336 | cik_sdma_enable(rdev, false); | ||
337 | |||
338 | /* sdma0 */ | ||
339 | fw_data = (const __be32 *)rdev->sdma_fw->data; | ||
340 | WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0); | ||
341 | for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++) | ||
342 | WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++)); | ||
343 | WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION); | ||
344 | |||
345 | /* sdma1 */ | ||
346 | fw_data = (const __be32 *)rdev->sdma_fw->data; | ||
347 | WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0); | ||
348 | for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++) | ||
349 | WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++)); | ||
350 | WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION); | ||
351 | |||
352 | WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0); | ||
353 | WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0); | ||
354 | return 0; | ||
355 | } | ||
356 | |||
357 | /** | ||
358 | * cik_sdma_resume - setup and start the async dma engines | ||
359 | * | ||
360 | * @rdev: radeon_device pointer | ||
361 | * | ||
362 | * Set up the DMA engines and enable them (CIK). | ||
363 | * Returns 0 for success, error for failure. | ||
364 | */ | ||
365 | int cik_sdma_resume(struct radeon_device *rdev) | ||
366 | { | ||
367 | int r; | ||
368 | |||
369 | /* Reset dma */ | ||
370 | WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1); | ||
371 | RREG32(SRBM_SOFT_RESET); | ||
372 | udelay(50); | ||
373 | WREG32(SRBM_SOFT_RESET, 0); | ||
374 | RREG32(SRBM_SOFT_RESET); | ||
375 | |||
376 | r = cik_sdma_load_microcode(rdev); | ||
377 | if (r) | ||
378 | return r; | ||
379 | |||
380 | /* unhalt the MEs */ | ||
381 | cik_sdma_enable(rdev, true); | ||
382 | |||
383 | /* start the gfx rings and rlc compute queues */ | ||
384 | r = cik_sdma_gfx_resume(rdev); | ||
385 | if (r) | ||
386 | return r; | ||
387 | r = cik_sdma_rlc_resume(rdev); | ||
388 | if (r) | ||
389 | return r; | ||
390 | |||
391 | return 0; | ||
392 | } | ||
393 | |||
394 | /** | ||
395 | * cik_sdma_fini - tear down the async dma engines | ||
396 | * | ||
397 | * @rdev: radeon_device pointer | ||
398 | * | ||
399 | * Stop the async dma engines and free the rings (CIK). | ||
400 | */ | ||
401 | void cik_sdma_fini(struct radeon_device *rdev) | ||
402 | { | ||
403 | /* stop the gfx rings and rlc compute queues */ | ||
404 | cik_sdma_gfx_stop(rdev); | ||
405 | cik_sdma_rlc_stop(rdev); | ||
406 | /* halt the MEs */ | ||
407 | cik_sdma_enable(rdev, false); | ||
408 | radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); | ||
409 | radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]); | ||
410 | /* XXX - compute dma queue tear down */ | ||
411 | } | ||
412 | |||
413 | /** | ||
414 | * cik_copy_dma - copy pages using the DMA engine | ||
415 | * | ||
416 | * @rdev: radeon_device pointer | ||
417 | * @src_offset: src GPU address | ||
418 | * @dst_offset: dst GPU address | ||
419 | * @num_gpu_pages: number of GPU pages to xfer | ||
420 | * @fence: radeon fence object | ||
421 | * | ||
422 | * Copy GPU paging using the DMA engine (CIK). | ||
423 | * Used by the radeon ttm implementation to move pages if | ||
424 | * registered as the asic copy callback. | ||
425 | */ | ||
426 | int cik_copy_dma(struct radeon_device *rdev, | ||
427 | uint64_t src_offset, uint64_t dst_offset, | ||
428 | unsigned num_gpu_pages, | ||
429 | struct radeon_fence **fence) | ||
430 | { | ||
431 | struct radeon_semaphore *sem = NULL; | ||
432 | int ring_index = rdev->asic->copy.dma_ring_index; | ||
433 | struct radeon_ring *ring = &rdev->ring[ring_index]; | ||
434 | u32 size_in_bytes, cur_size_in_bytes; | ||
435 | int i, num_loops; | ||
436 | int r = 0; | ||
437 | |||
438 | r = radeon_semaphore_create(rdev, &sem); | ||
439 | if (r) { | ||
440 | DRM_ERROR("radeon: moving bo (%d).\n", r); | ||
441 | return r; | ||
442 | } | ||
443 | |||
444 | size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); | ||
445 | num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); | ||
446 | r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14); | ||
447 | if (r) { | ||
448 | DRM_ERROR("radeon: moving bo (%d).\n", r); | ||
449 | radeon_semaphore_free(rdev, &sem, NULL); | ||
450 | return r; | ||
451 | } | ||
452 | |||
453 | if (radeon_fence_need_sync(*fence, ring->idx)) { | ||
454 | radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, | ||
455 | ring->idx); | ||
456 | radeon_fence_note_sync(*fence, ring->idx); | ||
457 | } else { | ||
458 | radeon_semaphore_free(rdev, &sem, NULL); | ||
459 | } | ||
460 | |||
461 | for (i = 0; i < num_loops; i++) { | ||
462 | cur_size_in_bytes = size_in_bytes; | ||
463 | if (cur_size_in_bytes > 0x1fffff) | ||
464 | cur_size_in_bytes = 0x1fffff; | ||
465 | size_in_bytes -= cur_size_in_bytes; | ||
466 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0)); | ||
467 | radeon_ring_write(ring, cur_size_in_bytes); | ||
468 | radeon_ring_write(ring, 0); /* src/dst endian swap */ | ||
469 | radeon_ring_write(ring, src_offset & 0xffffffff); | ||
470 | radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff); | ||
471 | radeon_ring_write(ring, dst_offset & 0xfffffffc); | ||
472 | radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff); | ||
473 | src_offset += cur_size_in_bytes; | ||
474 | dst_offset += cur_size_in_bytes; | ||
475 | } | ||
476 | |||
477 | r = radeon_fence_emit(rdev, fence, ring->idx); | ||
478 | if (r) { | ||
479 | radeon_ring_unlock_undo(rdev, ring); | ||
480 | return r; | ||
481 | } | ||
482 | |||
483 | radeon_ring_unlock_commit(rdev, ring); | ||
484 | radeon_semaphore_free(rdev, &sem, *fence); | ||
485 | |||
486 | return r; | ||
487 | } | ||
488 | |||
489 | /** | ||
490 | * cik_sdma_ring_test - simple async dma engine test | ||
491 | * | ||
492 | * @rdev: radeon_device pointer | ||
493 | * @ring: radeon_ring structure holding ring information | ||
494 | * | ||
495 | * Test the DMA engine by writing using it to write an | ||
496 | * value to memory. (CIK). | ||
497 | * Returns 0 for success, error for failure. | ||
498 | */ | ||
499 | int cik_sdma_ring_test(struct radeon_device *rdev, | ||
500 | struct radeon_ring *ring) | ||
501 | { | ||
502 | unsigned i; | ||
503 | int r; | ||
504 | void __iomem *ptr = (void *)rdev->vram_scratch.ptr; | ||
505 | u32 tmp; | ||
506 | |||
507 | if (!ptr) { | ||
508 | DRM_ERROR("invalid vram scratch pointer\n"); | ||
509 | return -EINVAL; | ||
510 | } | ||
511 | |||
512 | tmp = 0xCAFEDEAD; | ||
513 | writel(tmp, ptr); | ||
514 | |||
515 | r = radeon_ring_lock(rdev, ring, 4); | ||
516 | if (r) { | ||
517 | DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r); | ||
518 | return r; | ||
519 | } | ||
520 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); | ||
521 | radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); | ||
522 | radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff); | ||
523 | radeon_ring_write(ring, 1); /* number of DWs to follow */ | ||
524 | radeon_ring_write(ring, 0xDEADBEEF); | ||
525 | radeon_ring_unlock_commit(rdev, ring); | ||
526 | |||
527 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
528 | tmp = readl(ptr); | ||
529 | if (tmp == 0xDEADBEEF) | ||
530 | break; | ||
531 | DRM_UDELAY(1); | ||
532 | } | ||
533 | |||
534 | if (i < rdev->usec_timeout) { | ||
535 | DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); | ||
536 | } else { | ||
537 | DRM_ERROR("radeon: ring %d test failed (0x%08X)\n", | ||
538 | ring->idx, tmp); | ||
539 | r = -EINVAL; | ||
540 | } | ||
541 | return r; | ||
542 | } | ||
543 | |||
544 | /** | ||
545 | * cik_sdma_ib_test - test an IB on the DMA engine | ||
546 | * | ||
547 | * @rdev: radeon_device pointer | ||
548 | * @ring: radeon_ring structure holding ring information | ||
549 | * | ||
550 | * Test a simple IB in the DMA ring (CIK). | ||
551 | * Returns 0 on success, error on failure. | ||
552 | */ | ||
553 | int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) | ||
554 | { | ||
555 | struct radeon_ib ib; | ||
556 | unsigned i; | ||
557 | int r; | ||
558 | void __iomem *ptr = (void *)rdev->vram_scratch.ptr; | ||
559 | u32 tmp = 0; | ||
560 | |||
561 | if (!ptr) { | ||
562 | DRM_ERROR("invalid vram scratch pointer\n"); | ||
563 | return -EINVAL; | ||
564 | } | ||
565 | |||
566 | tmp = 0xCAFEDEAD; | ||
567 | writel(tmp, ptr); | ||
568 | |||
569 | r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); | ||
570 | if (r) { | ||
571 | DRM_ERROR("radeon: failed to get ib (%d).\n", r); | ||
572 | return r; | ||
573 | } | ||
574 | |||
575 | ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); | ||
576 | ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; | ||
577 | ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff; | ||
578 | ib.ptr[3] = 1; | ||
579 | ib.ptr[4] = 0xDEADBEEF; | ||
580 | ib.length_dw = 5; | ||
581 | |||
582 | r = radeon_ib_schedule(rdev, &ib, NULL); | ||
583 | if (r) { | ||
584 | radeon_ib_free(rdev, &ib); | ||
585 | DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); | ||
586 | return r; | ||
587 | } | ||
588 | r = radeon_fence_wait(ib.fence, false); | ||
589 | if (r) { | ||
590 | DRM_ERROR("radeon: fence wait failed (%d).\n", r); | ||
591 | return r; | ||
592 | } | ||
593 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
594 | tmp = readl(ptr); | ||
595 | if (tmp == 0xDEADBEEF) | ||
596 | break; | ||
597 | DRM_UDELAY(1); | ||
598 | } | ||
599 | if (i < rdev->usec_timeout) { | ||
600 | DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i); | ||
601 | } else { | ||
602 | DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp); | ||
603 | r = -EINVAL; | ||
604 | } | ||
605 | radeon_ib_free(rdev, &ib); | ||
606 | return r; | ||
607 | } | ||
608 | |||
609 | /** | ||
610 | * cik_sdma_is_lockup - Check if the DMA engine is locked up | ||
611 | * | ||
612 | * @rdev: radeon_device pointer | ||
613 | * @ring: radeon_ring structure holding ring information | ||
614 | * | ||
615 | * Check if the async DMA engine is locked up (CIK). | ||
616 | * Returns true if the engine appears to be locked up, false if not. | ||
617 | */ | ||
618 | bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) | ||
619 | { | ||
620 | u32 reset_mask = cik_gpu_check_soft_reset(rdev); | ||
621 | u32 mask; | ||
622 | |||
623 | if (ring->idx == R600_RING_TYPE_DMA_INDEX) | ||
624 | mask = RADEON_RESET_DMA; | ||
625 | else | ||
626 | mask = RADEON_RESET_DMA1; | ||
627 | |||
628 | if (!(reset_mask & mask)) { | ||
629 | radeon_ring_lockup_update(ring); | ||
630 | return false; | ||
631 | } | ||
632 | /* force ring activities */ | ||
633 | radeon_ring_force_activity(rdev, ring); | ||
634 | return radeon_ring_test_lockup(rdev, ring); | ||
635 | } | ||
636 | |||
637 | /** | ||
638 | * cik_sdma_vm_set_page - update the page tables using sDMA | ||
639 | * | ||
640 | * @rdev: radeon_device pointer | ||
641 | * @ib: indirect buffer to fill with commands | ||
642 | * @pe: addr of the page entry | ||
643 | * @addr: dst addr to write into pe | ||
644 | * @count: number of page entries to update | ||
645 | * @incr: increase next addr by incr bytes | ||
646 | * @flags: access flags | ||
647 | * | ||
648 | * Update the page tables using sDMA (CIK). | ||
649 | */ | ||
650 | void cik_sdma_vm_set_page(struct radeon_device *rdev, | ||
651 | struct radeon_ib *ib, | ||
652 | uint64_t pe, | ||
653 | uint64_t addr, unsigned count, | ||
654 | uint32_t incr, uint32_t flags) | ||
655 | { | ||
656 | uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); | ||
657 | uint64_t value; | ||
658 | unsigned ndw; | ||
659 | |||
660 | if (flags & RADEON_VM_PAGE_SYSTEM) { | ||
661 | while (count) { | ||
662 | ndw = count * 2; | ||
663 | if (ndw > 0xFFFFE) | ||
664 | ndw = 0xFFFFE; | ||
665 | |||
666 | /* for non-physically contiguous pages (system) */ | ||
667 | ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); | ||
668 | ib->ptr[ib->length_dw++] = pe; | ||
669 | ib->ptr[ib->length_dw++] = upper_32_bits(pe); | ||
670 | ib->ptr[ib->length_dw++] = ndw; | ||
671 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { | ||
672 | if (flags & RADEON_VM_PAGE_SYSTEM) { | ||
673 | value = radeon_vm_map_gart(rdev, addr); | ||
674 | value &= 0xFFFFFFFFFFFFF000ULL; | ||
675 | } else if (flags & RADEON_VM_PAGE_VALID) { | ||
676 | value = addr; | ||
677 | } else { | ||
678 | value = 0; | ||
679 | } | ||
680 | addr += incr; | ||
681 | value |= r600_flags; | ||
682 | ib->ptr[ib->length_dw++] = value; | ||
683 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | ||
684 | } | ||
685 | } | ||
686 | } else { | ||
687 | while (count) { | ||
688 | ndw = count; | ||
689 | if (ndw > 0x7FFFF) | ||
690 | ndw = 0x7FFFF; | ||
691 | |||
692 | if (flags & RADEON_VM_PAGE_VALID) | ||
693 | value = addr; | ||
694 | else | ||
695 | value = 0; | ||
696 | /* for physically contiguous pages (vram) */ | ||
697 | ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); | ||
698 | ib->ptr[ib->length_dw++] = pe; /* dst addr */ | ||
699 | ib->ptr[ib->length_dw++] = upper_32_bits(pe); | ||
700 | ib->ptr[ib->length_dw++] = r600_flags; /* mask */ | ||
701 | ib->ptr[ib->length_dw++] = 0; | ||
702 | ib->ptr[ib->length_dw++] = value; /* value */ | ||
703 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | ||
704 | ib->ptr[ib->length_dw++] = incr; /* increment size */ | ||
705 | ib->ptr[ib->length_dw++] = 0; | ||
706 | ib->ptr[ib->length_dw++] = ndw; /* number of entries */ | ||
707 | pe += ndw * 8; | ||
708 | addr += ndw * incr; | ||
709 | count -= ndw; | ||
710 | } | ||
711 | } | ||
712 | while (ib->length_dw & 0x7) | ||
713 | ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0); | ||
714 | } | ||
715 | |||
716 | /** | ||
717 | * cik_dma_vm_flush - cik vm flush using sDMA | ||
718 | * | ||
719 | * @rdev: radeon_device pointer | ||
720 | * | ||
721 | * Update the page table base and flush the VM TLB | ||
722 | * using sDMA (CIK). | ||
723 | */ | ||
724 | void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) | ||
725 | { | ||
726 | struct radeon_ring *ring = &rdev->ring[ridx]; | ||
727 | u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) | | ||
728 | SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ | ||
729 | u32 ref_and_mask; | ||
730 | |||
731 | if (vm == NULL) | ||
732 | return; | ||
733 | |||
734 | if (ridx == R600_RING_TYPE_DMA_INDEX) | ||
735 | ref_and_mask = SDMA0; | ||
736 | else | ||
737 | ref_and_mask = SDMA1; | ||
738 | |||
739 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); | ||
740 | if (vm->id < 8) { | ||
741 | radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2); | ||
742 | } else { | ||
743 | radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2); | ||
744 | } | ||
745 | radeon_ring_write(ring, vm->pd_gpu_addr >> 12); | ||
746 | |||
747 | /* update SH_MEM_* regs */ | ||
748 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); | ||
749 | radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); | ||
750 | radeon_ring_write(ring, VMID(vm->id)); | ||
751 | |||
752 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); | ||
753 | radeon_ring_write(ring, SH_MEM_BASES >> 2); | ||
754 | radeon_ring_write(ring, 0); | ||
755 | |||
756 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); | ||
757 | radeon_ring_write(ring, SH_MEM_CONFIG >> 2); | ||
758 | radeon_ring_write(ring, 0); | ||
759 | |||
760 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); | ||
761 | radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2); | ||
762 | radeon_ring_write(ring, 1); | ||
763 | |||
764 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); | ||
765 | radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2); | ||
766 | radeon_ring_write(ring, 0); | ||
767 | |||
768 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); | ||
769 | radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); | ||
770 | radeon_ring_write(ring, VMID(0)); | ||
771 | |||
772 | /* flush HDP */ | ||
773 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); | ||
774 | radeon_ring_write(ring, GPU_HDP_FLUSH_DONE); | ||
775 | radeon_ring_write(ring, GPU_HDP_FLUSH_REQ); | ||
776 | radeon_ring_write(ring, ref_and_mask); /* REFERENCE */ | ||
777 | radeon_ring_write(ring, ref_and_mask); /* MASK */ | ||
778 | radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */ | ||
779 | |||
780 | /* flush TLB */ | ||
781 | radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); | ||
782 | radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); | ||
783 | radeon_ring_write(ring, 1 << vm->id); | ||
784 | } | ||
785 | |||