diff options
author | Christian König <christian.koenig@amd.com> | 2013-08-13 05:56:54 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2013-08-30 16:30:42 -0400 |
commit | 2483b4ea982efe8a544697d3f9642932e9af4dc1 (patch) | |
tree | f739e1b55b5e200817c174d4eae6f22935d152bf /drivers/gpu/drm/radeon/evergreen_dma.c | |
parent | e409b128625732926c112cc9b709fb7bb1aa387f (diff) |
drm/radeon: separate DMA code
Similar to separating the UVD code, just put the DMA
functions into separate files.
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/evergreen_dma.c')
-rw-r--r-- | drivers/gpu/drm/radeon/evergreen_dma.c | 190 |
1 files changed, 190 insertions, 0 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c new file mode 100644 index 000000000000..6a0656d00ed0 --- /dev/null +++ b/drivers/gpu/drm/radeon/evergreen_dma.c | |||
@@ -0,0 +1,190 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Alex Deucher | ||
23 | */ | ||
24 | #include <drm/drmP.h> | ||
25 | #include "radeon.h" | ||
26 | #include "radeon_asic.h" | ||
27 | #include "evergreend.h" | ||
28 | |||
29 | u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev); | ||
30 | |||
31 | /** | ||
32 | * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring | ||
33 | * | ||
34 | * @rdev: radeon_device pointer | ||
35 | * @fence: radeon fence object | ||
36 | * | ||
37 | * Add a DMA fence packet to the ring to write | ||
38 | * the fence seq number and DMA trap packet to generate | ||
39 | * an interrupt if needed (evergreen-SI). | ||
40 | */ | ||
41 | void evergreen_dma_fence_ring_emit(struct radeon_device *rdev, | ||
42 | struct radeon_fence *fence) | ||
43 | { | ||
44 | struct radeon_ring *ring = &rdev->ring[fence->ring]; | ||
45 | u64 addr = rdev->fence_drv[fence->ring].gpu_addr; | ||
46 | /* write the fence */ | ||
47 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0)); | ||
48 | radeon_ring_write(ring, addr & 0xfffffffc); | ||
49 | radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); | ||
50 | radeon_ring_write(ring, fence->seq); | ||
51 | /* generate an interrupt */ | ||
52 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0)); | ||
53 | /* flush HDP */ | ||
54 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0)); | ||
55 | radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); | ||
56 | radeon_ring_write(ring, 1); | ||
57 | } | ||
58 | |||
59 | /** | ||
60 | * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine | ||
61 | * | ||
62 | * @rdev: radeon_device pointer | ||
63 | * @ib: IB object to schedule | ||
64 | * | ||
65 | * Schedule an IB in the DMA ring (evergreen). | ||
66 | */ | ||
67 | void evergreen_dma_ring_ib_execute(struct radeon_device *rdev, | ||
68 | struct radeon_ib *ib) | ||
69 | { | ||
70 | struct radeon_ring *ring = &rdev->ring[ib->ring]; | ||
71 | |||
72 | if (rdev->wb.enabled) { | ||
73 | u32 next_rptr = ring->wptr + 4; | ||
74 | while ((next_rptr & 7) != 5) | ||
75 | next_rptr++; | ||
76 | next_rptr += 3; | ||
77 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1)); | ||
78 | radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); | ||
79 | radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); | ||
80 | radeon_ring_write(ring, next_rptr); | ||
81 | } | ||
82 | |||
83 | /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. | ||
84 | * Pad as necessary with NOPs. | ||
85 | */ | ||
86 | while ((ring->wptr & 7) != 5) | ||
87 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0)); | ||
88 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0)); | ||
89 | radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); | ||
90 | radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); | ||
91 | |||
92 | } | ||
93 | |||
94 | /** | ||
95 | * evergreen_copy_dma - copy pages using the DMA engine | ||
96 | * | ||
97 | * @rdev: radeon_device pointer | ||
98 | * @src_offset: src GPU address | ||
99 | * @dst_offset: dst GPU address | ||
100 | * @num_gpu_pages: number of GPU pages to xfer | ||
101 | * @fence: radeon fence object | ||
102 | * | ||
103 | * Copy GPU paging using the DMA engine (evergreen-cayman). | ||
104 | * Used by the radeon ttm implementation to move pages if | ||
105 | * registered as the asic copy callback. | ||
106 | */ | ||
107 | int evergreen_copy_dma(struct radeon_device *rdev, | ||
108 | uint64_t src_offset, uint64_t dst_offset, | ||
109 | unsigned num_gpu_pages, | ||
110 | struct radeon_fence **fence) | ||
111 | { | ||
112 | struct radeon_semaphore *sem = NULL; | ||
113 | int ring_index = rdev->asic->copy.dma_ring_index; | ||
114 | struct radeon_ring *ring = &rdev->ring[ring_index]; | ||
115 | u32 size_in_dw, cur_size_in_dw; | ||
116 | int i, num_loops; | ||
117 | int r = 0; | ||
118 | |||
119 | r = radeon_semaphore_create(rdev, &sem); | ||
120 | if (r) { | ||
121 | DRM_ERROR("radeon: moving bo (%d).\n", r); | ||
122 | return r; | ||
123 | } | ||
124 | |||
125 | size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; | ||
126 | num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff); | ||
127 | r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11); | ||
128 | if (r) { | ||
129 | DRM_ERROR("radeon: moving bo (%d).\n", r); | ||
130 | radeon_semaphore_free(rdev, &sem, NULL); | ||
131 | return r; | ||
132 | } | ||
133 | |||
134 | if (radeon_fence_need_sync(*fence, ring->idx)) { | ||
135 | radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, | ||
136 | ring->idx); | ||
137 | radeon_fence_note_sync(*fence, ring->idx); | ||
138 | } else { | ||
139 | radeon_semaphore_free(rdev, &sem, NULL); | ||
140 | } | ||
141 | |||
142 | for (i = 0; i < num_loops; i++) { | ||
143 | cur_size_in_dw = size_in_dw; | ||
144 | if (cur_size_in_dw > 0xFFFFF) | ||
145 | cur_size_in_dw = 0xFFFFF; | ||
146 | size_in_dw -= cur_size_in_dw; | ||
147 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw)); | ||
148 | radeon_ring_write(ring, dst_offset & 0xfffffffc); | ||
149 | radeon_ring_write(ring, src_offset & 0xfffffffc); | ||
150 | radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); | ||
151 | radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); | ||
152 | src_offset += cur_size_in_dw * 4; | ||
153 | dst_offset += cur_size_in_dw * 4; | ||
154 | } | ||
155 | |||
156 | r = radeon_fence_emit(rdev, fence, ring->idx); | ||
157 | if (r) { | ||
158 | radeon_ring_unlock_undo(rdev, ring); | ||
159 | return r; | ||
160 | } | ||
161 | |||
162 | radeon_ring_unlock_commit(rdev, ring); | ||
163 | radeon_semaphore_free(rdev, &sem, *fence); | ||
164 | |||
165 | return r; | ||
166 | } | ||
167 | |||
168 | /** | ||
169 | * evergreen_dma_is_lockup - Check if the DMA engine is locked up | ||
170 | * | ||
171 | * @rdev: radeon_device pointer | ||
172 | * @ring: radeon_ring structure holding ring information | ||
173 | * | ||
174 | * Check if the async DMA engine is locked up. | ||
175 | * Returns true if the engine appears to be locked up, false if not. | ||
176 | */ | ||
177 | bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) | ||
178 | { | ||
179 | u32 reset_mask = evergreen_gpu_check_soft_reset(rdev); | ||
180 | |||
181 | if (!(reset_mask & RADEON_RESET_DMA)) { | ||
182 | radeon_ring_lockup_update(ring); | ||
183 | return false; | ||
184 | } | ||
185 | /* force ring activities */ | ||
186 | radeon_ring_force_activity(rdev, ring); | ||
187 | return radeon_ring_test_lockup(rdev, ring); | ||
188 | } | ||
189 | |||
190 | |||