aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/evergreen.c
diff options
context:
space:
mode:
authorAlex Deucher <alexdeucher@gmail.com>2010-01-12 17:54:34 -0500
committerDave Airlie <airlied@redhat.com>2010-02-08 18:44:02 -0500
commitbcc1c2a1d22974215e39dc87ce746ba9a39223e5 (patch)
tree62ae9dfab266202240307fc3998806c1d4655552 /drivers/gpu/drm/radeon/evergreen.c
parente97bd974448ce90f8e4720499d84580bcd6a2f7a (diff)
drm/radeon/kms: add initial Evergreen support (Radeon HD 5xxx)
This adds initial Evergreen KMS support, it doesn't include any acceleration features or interrupt handling yet. Major changes are DCE4 handling for PLLs for the > 2 crtcs. Signed-off-by: Alex Deucher <alexdeucher@gmail.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/evergreen.c')
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c794
1 files changed, 794 insertions, 0 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
new file mode 100644
index 000000000000..c2f9752e4ee0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -0,0 +1,794 @@
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <linux/platform_device.h>
26#include "drmP.h"
27#include "radeon.h"
28#include "radeon_drm.h"
29#include "rv770d.h"
30#include "atom.h"
31#include "avivod.h"
32#include "evergreen_reg.h"
33
34static void evergreen_gpu_init(struct radeon_device *rdev);
35void evergreen_fini(struct radeon_device *rdev);
36
37bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
38{
39 bool connected = false;
40 /* XXX */
41 return connected;
42}
43
44void evergreen_hpd_set_polarity(struct radeon_device *rdev,
45 enum radeon_hpd_id hpd)
46{
47 /* XXX */
48}
49
50void evergreen_hpd_init(struct radeon_device *rdev)
51{
52 /* XXX */
53}
54
55
56void evergreen_bandwidth_update(struct radeon_device *rdev)
57{
58 /* XXX */
59}
60
61void evergreen_hpd_fini(struct radeon_device *rdev)
62{
63 /* XXX */
64}
65
66static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
67{
68 unsigned i;
69 u32 tmp;
70
71 for (i = 0; i < rdev->usec_timeout; i++) {
72 /* read MC_STATUS */
73 tmp = RREG32(SRBM_STATUS) & 0x1F00;
74 if (!tmp)
75 return 0;
76 udelay(1);
77 }
78 return -1;
79}
80
81/*
82 * GART
83 */
84int evergreen_pcie_gart_enable(struct radeon_device *rdev)
85{
86 u32 tmp;
87 int r, i;
88
89 if (rdev->gart.table.vram.robj == NULL) {
90 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
91 return -EINVAL;
92 }
93 r = radeon_gart_table_vram_pin(rdev);
94 if (r)
95 return r;
96 /* Setup L2 cache */
97 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
98 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
99 EFFECTIVE_L2_QUEUE_SIZE(7));
100 WREG32(VM_L2_CNTL2, 0);
101 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
102 /* Setup TLB control */
103 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
104 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
105 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
106 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
107 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
108 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
109 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
110 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
111 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
112 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
113 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
114 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
115 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
116 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
117 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
118 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
119 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
120 (u32)(rdev->dummy_page.addr >> 12));
121 for (i = 1; i < 7; i++)
122 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
123
124 r600_pcie_gart_tlb_flush(rdev);
125 rdev->gart.ready = true;
126 return 0;
127}
128
129void evergreen_pcie_gart_disable(struct radeon_device *rdev)
130{
131 u32 tmp;
132 int i, r;
133
134 /* Disable all tables */
135 for (i = 0; i < 7; i++)
136 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
137
138 /* Setup L2 cache */
139 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
140 EFFECTIVE_L2_QUEUE_SIZE(7));
141 WREG32(VM_L2_CNTL2, 0);
142 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
143 /* Setup TLB control */
144 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
145 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
146 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
147 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
148 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
149 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
150 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
151 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
152 if (rdev->gart.table.vram.robj) {
153 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
154 if (likely(r == 0)) {
155 radeon_bo_kunmap(rdev->gart.table.vram.robj);
156 radeon_bo_unpin(rdev->gart.table.vram.robj);
157 radeon_bo_unreserve(rdev->gart.table.vram.robj);
158 }
159 }
160}
161
162void evergreen_pcie_gart_fini(struct radeon_device *rdev)
163{
164 evergreen_pcie_gart_disable(rdev);
165 radeon_gart_table_vram_free(rdev);
166 radeon_gart_fini(rdev);
167}
168
169
170void evergreen_agp_enable(struct radeon_device *rdev)
171{
172 u32 tmp;
173 int i;
174
175 /* Setup L2 cache */
176 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
177 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
178 EFFECTIVE_L2_QUEUE_SIZE(7));
179 WREG32(VM_L2_CNTL2, 0);
180 WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
181 /* Setup TLB control */
182 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
183 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
184 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
185 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
186 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
187 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
188 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
189 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
190 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
191 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
192 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
193 for (i = 0; i < 7; i++)
194 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
195}
196
197static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
198{
199 save->vga_control[0] = RREG32(D1VGA_CONTROL);
200 save->vga_control[1] = RREG32(D2VGA_CONTROL);
201 save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
202 save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
203 save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
204 save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
205 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
206 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
207 save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
208 save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
209 save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
210 save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
211 save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
212 save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
213
214 /* Stop all video */
215 WREG32(VGA_RENDER_CONTROL, 0);
216 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
217 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
218 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
219 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
220 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
221 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
222 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
223 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
224 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
225 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
226 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
227 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
228 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
229 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
230 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
231 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
232 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
233 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
234
235 WREG32(D1VGA_CONTROL, 0);
236 WREG32(D2VGA_CONTROL, 0);
237 WREG32(EVERGREEN_D3VGA_CONTROL, 0);
238 WREG32(EVERGREEN_D4VGA_CONTROL, 0);
239 WREG32(EVERGREEN_D5VGA_CONTROL, 0);
240 WREG32(EVERGREEN_D6VGA_CONTROL, 0);
241}
242
243static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
244{
245 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
246 upper_32_bits(rdev->mc.vram_start));
247 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
248 upper_32_bits(rdev->mc.vram_start));
249 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
250 (u32)rdev->mc.vram_start);
251 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
252 (u32)rdev->mc.vram_start);
253
254 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
255 upper_32_bits(rdev->mc.vram_start));
256 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
257 upper_32_bits(rdev->mc.vram_start));
258 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
259 (u32)rdev->mc.vram_start);
260 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
261 (u32)rdev->mc.vram_start);
262
263 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
264 upper_32_bits(rdev->mc.vram_start));
265 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
266 upper_32_bits(rdev->mc.vram_start));
267 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
268 (u32)rdev->mc.vram_start);
269 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
270 (u32)rdev->mc.vram_start);
271
272 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
273 upper_32_bits(rdev->mc.vram_start));
274 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
275 upper_32_bits(rdev->mc.vram_start));
276 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
277 (u32)rdev->mc.vram_start);
278 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
279 (u32)rdev->mc.vram_start);
280
281 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
282 upper_32_bits(rdev->mc.vram_start));
283 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
284 upper_32_bits(rdev->mc.vram_start));
285 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
286 (u32)rdev->mc.vram_start);
287 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
288 (u32)rdev->mc.vram_start);
289
290 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
291 upper_32_bits(rdev->mc.vram_start));
292 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
293 upper_32_bits(rdev->mc.vram_start));
294 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
295 (u32)rdev->mc.vram_start);
296 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
297 (u32)rdev->mc.vram_start);
298
299 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
300 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
301 /* Unlock host access */
302 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
303 mdelay(1);
304 /* Restore video state */
305 WREG32(D1VGA_CONTROL, save->vga_control[0]);
306 WREG32(D2VGA_CONTROL, save->vga_control[1]);
307 WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
308 WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
309 WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
310 WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
311 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
312 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
313 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
314 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
315 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
316 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
317 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
318 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
319 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
320 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
321 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
322 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
323 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
324 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
325 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
326 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
327 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
328 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
329 WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
330}
331
332static void evergreen_mc_program(struct radeon_device *rdev)
333{
334 struct evergreen_mc_save save;
335 u32 tmp;
336 int i, j;
337
338 /* Initialize HDP */
339 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
340 WREG32((0x2c14 + j), 0x00000000);
341 WREG32((0x2c18 + j), 0x00000000);
342 WREG32((0x2c1c + j), 0x00000000);
343 WREG32((0x2c20 + j), 0x00000000);
344 WREG32((0x2c24 + j), 0x00000000);
345 }
346 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
347
348 evergreen_mc_stop(rdev, &save);
349 if (evergreen_mc_wait_for_idle(rdev)) {
350 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
351 }
352 /* Lockout access through VGA aperture*/
353 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
354 /* Update configuration */
355 if (rdev->flags & RADEON_IS_AGP) {
356 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
357 /* VRAM before AGP */
358 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
359 rdev->mc.vram_start >> 12);
360 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
361 rdev->mc.gtt_end >> 12);
362 } else {
363 /* VRAM after AGP */
364 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
365 rdev->mc.gtt_start >> 12);
366 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
367 rdev->mc.vram_end >> 12);
368 }
369 } else {
370 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
371 rdev->mc.vram_start >> 12);
372 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
373 rdev->mc.vram_end >> 12);
374 }
375 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
376 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
377 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
378 WREG32(MC_VM_FB_LOCATION, tmp);
379 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
380 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
381 WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
382 if (rdev->flags & RADEON_IS_AGP) {
383 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
384 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
385 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
386 } else {
387 WREG32(MC_VM_AGP_BASE, 0);
388 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
389 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
390 }
391 if (evergreen_mc_wait_for_idle(rdev)) {
392 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
393 }
394 evergreen_mc_resume(rdev, &save);
395 /* we need to own VRAM, so turn off the VGA renderer here
396 * to stop it overwriting our objects */
397 rv515_vga_render_disable(rdev);
398}
399
400#if 0
401/*
402 * CP.
403 */
404static void evergreen_cp_stop(struct radeon_device *rdev)
405{
406 /* XXX */
407}
408
409
410static int evergreen_cp_load_microcode(struct radeon_device *rdev)
411{
412 /* XXX */
413
414 return 0;
415}
416
417
418/*
419 * Core functions
420 */
421static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
422 u32 num_backends,
423 u32 backend_disable_mask)
424{
425 u32 backend_map = 0;
426
427 return backend_map;
428}
429#endif
430
431static void evergreen_gpu_init(struct radeon_device *rdev)
432{
433 /* XXX */
434}
435
436int evergreen_mc_init(struct radeon_device *rdev)
437{
438 fixed20_12 a;
439 u32 tmp;
440 int chansize, numchan;
441 int r;
442
443 /* Get VRAM informations */
444 rdev->mc.vram_is_ddr = true;
445 tmp = RREG32(MC_ARB_RAMCFG);
446 if (tmp & CHANSIZE_OVERRIDE) {
447 chansize = 16;
448 } else if (tmp & CHANSIZE_MASK) {
449 chansize = 64;
450 } else {
451 chansize = 32;
452 }
453 tmp = RREG32(MC_SHARED_CHMAP);
454 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
455 case 0:
456 default:
457 numchan = 1;
458 break;
459 case 1:
460 numchan = 2;
461 break;
462 case 2:
463 numchan = 4;
464 break;
465 case 3:
466 numchan = 8;
467 break;
468 }
469 rdev->mc.vram_width = numchan * chansize;
470 /* Could aper size report 0 ? */
471 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
472 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
473 /* Setup GPU memory space */
474 /* size in MB on evergreen */
475 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
476 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
477
478 if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
479 rdev->mc.mc_vram_size = rdev->mc.aper_size;
480
481 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
482 rdev->mc.real_vram_size = rdev->mc.aper_size;
483
484 if (rdev->flags & RADEON_IS_AGP) {
485 r = radeon_agp_init(rdev);
486 if (r)
487 return r;
488 /* gtt_size is setup by radeon_agp_init */
489 rdev->mc.gtt_location = rdev->mc.agp_base;
490 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
491 /* Try to put vram before or after AGP because we
492 * we want SYSTEM_APERTURE to cover both VRAM and
493 * AGP so that GPU can catch out of VRAM/AGP access
494 */
495 if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
496 /* Enought place before */
497 rdev->mc.vram_location = rdev->mc.gtt_location -
498 rdev->mc.mc_vram_size;
499 } else if (tmp > rdev->mc.mc_vram_size) {
500 /* Enought place after */
501 rdev->mc.vram_location = rdev->mc.gtt_location +
502 rdev->mc.gtt_size;
503 } else {
504 /* Try to setup VRAM then AGP might not
505 * not work on some card
506 */
507 rdev->mc.vram_location = 0x00000000UL;
508 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
509 }
510 } else {
511 rdev->mc.vram_location = 0x00000000UL;
512 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
513 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
514 }
515 rdev->mc.vram_start = rdev->mc.vram_location;
516 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
517 rdev->mc.gtt_start = rdev->mc.gtt_location;
518 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
519 /* FIXME: we should enforce default clock in case GPU is not in
520 * default setup
521 */
522 a.full = rfixed_const(100);
523 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
524 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
525 return 0;
526}
527int evergreen_gpu_reset(struct radeon_device *rdev)
528{
529 /* FIXME: implement for evergreen */
530 return 0;
531}
532
533static int evergreen_startup(struct radeon_device *rdev)
534{
535#if 0
536 int r;
537
538 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
539 r = r600_init_microcode(rdev);
540 if (r) {
541 DRM_ERROR("Failed to load firmware!\n");
542 return r;
543 }
544 }
545#endif
546 evergreen_mc_program(rdev);
547#if 0
548 if (rdev->flags & RADEON_IS_AGP) {
549 evergreem_agp_enable(rdev);
550 } else {
551 r = evergreen_pcie_gart_enable(rdev);
552 if (r)
553 return r;
554 }
555#endif
556 evergreen_gpu_init(rdev);
557#if 0
558 if (!rdev->r600_blit.shader_obj) {
559 r = r600_blit_init(rdev);
560 if (r) {
561 DRM_ERROR("radeon: failed blitter (%d).\n", r);
562 return r;
563 }
564 }
565
566 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
567 if (unlikely(r != 0))
568 return r;
569 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
570 &rdev->r600_blit.shader_gpu_addr);
571 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
572 if (r) {
573 DRM_ERROR("failed to pin blit object %d\n", r);
574 return r;
575 }
576
577 /* Enable IRQ */
578 r = r600_irq_init(rdev);
579 if (r) {
580 DRM_ERROR("radeon: IH init failed (%d).\n", r);
581 radeon_irq_kms_fini(rdev);
582 return r;
583 }
584 r600_irq_set(rdev);
585
586 r = radeon_ring_init(rdev, rdev->cp.ring_size);
587 if (r)
588 return r;
589 r = evergreen_cp_load_microcode(rdev);
590 if (r)
591 return r;
592 r = r600_cp_resume(rdev);
593 if (r)
594 return r;
595 /* write back buffer are not vital so don't worry about failure */
596 r600_wb_enable(rdev);
597#endif
598 return 0;
599}
600
601int evergreen_resume(struct radeon_device *rdev)
602{
603 int r;
604
605 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
606 * posting will perform necessary task to bring back GPU into good
607 * shape.
608 */
609 /* post card */
610 atom_asic_init(rdev->mode_info.atom_context);
611 /* Initialize clocks */
612 r = radeon_clocks_init(rdev);
613 if (r) {
614 return r;
615 }
616
617 r = evergreen_startup(rdev);
618 if (r) {
619 DRM_ERROR("r600 startup failed on resume\n");
620 return r;
621 }
622#if 0
623 r = r600_ib_test(rdev);
624 if (r) {
625 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
626 return r;
627 }
628#endif
629 return r;
630
631}
632
633int evergreen_suspend(struct radeon_device *rdev)
634{
635#if 0
636 int r;
637
638 /* FIXME: we should wait for ring to be empty */
639 r700_cp_stop(rdev);
640 rdev->cp.ready = false;
641 r600_wb_disable(rdev);
642 evergreen_pcie_gart_disable(rdev);
643 /* unpin shaders bo */
644 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
645 if (likely(r == 0)) {
646 radeon_bo_unpin(rdev->r600_blit.shader_obj);
647 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
648 }
649#endif
650 return 0;
651}
652
653static bool evergreen_card_posted(struct radeon_device *rdev)
654{
655 u32 reg;
656
657 /* first check CRTCs */
658 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
659 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
660 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
661 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
662 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
663 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
664 if (reg & EVERGREEN_CRTC_MASTER_EN)
665 return true;
666
667 /* then check MEM_SIZE, in case the crtcs are off */
668 if (RREG32(CONFIG_MEMSIZE))
669 return true;
670
671 return false;
672}
673
674/* Plan is to move initialization in that function and use
675 * helper function so that radeon_device_init pretty much
676 * do nothing more than calling asic specific function. This
677 * should also allow to remove a bunch of callback function
678 * like vram_info.
679 */
680int evergreen_init(struct radeon_device *rdev)
681{
682 int r;
683
684 r = radeon_dummy_page_init(rdev);
685 if (r)
686 return r;
687 /* This don't do much */
688 r = radeon_gem_init(rdev);
689 if (r)
690 return r;
691 /* Read BIOS */
692 if (!radeon_get_bios(rdev)) {
693 if (ASIC_IS_AVIVO(rdev))
694 return -EINVAL;
695 }
696 /* Must be an ATOMBIOS */
697 if (!rdev->is_atom_bios) {
698 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
699 return -EINVAL;
700 }
701 r = radeon_atombios_init(rdev);
702 if (r)
703 return r;
704 /* Post card if necessary */
705 if (!evergreen_card_posted(rdev)) {
706 if (!rdev->bios) {
707 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
708 return -EINVAL;
709 }
710 DRM_INFO("GPU not posted. posting now...\n");
711 atom_asic_init(rdev->mode_info.atom_context);
712 }
713 /* Initialize scratch registers */
714 r600_scratch_init(rdev);
715 /* Initialize surface registers */
716 radeon_surface_init(rdev);
717 /* Initialize clocks */
718 radeon_get_clock_info(rdev->ddev);
719 r = radeon_clocks_init(rdev);
720 if (r)
721 return r;
722 /* Initialize power management */
723 radeon_pm_init(rdev);
724 /* Fence driver */
725 r = radeon_fence_driver_init(rdev);
726 if (r)
727 return r;
728 r = evergreen_mc_init(rdev);
729 if (r)
730 return r;
731 /* Memory manager */
732 r = radeon_bo_init(rdev);
733 if (r)
734 return r;
735#if 0
736 r = radeon_irq_kms_init(rdev);
737 if (r)
738 return r;
739
740 rdev->cp.ring_obj = NULL;
741 r600_ring_init(rdev, 1024 * 1024);
742
743 rdev->ih.ring_obj = NULL;
744 r600_ih_ring_init(rdev, 64 * 1024);
745
746 r = r600_pcie_gart_init(rdev);
747 if (r)
748 return r;
749#endif
750 rdev->accel_working = false;
751 r = evergreen_startup(rdev);
752 if (r) {
753 evergreen_suspend(rdev);
754 /*r600_wb_fini(rdev);*/
755 /*radeon_ring_fini(rdev);*/
756 /*evergreen_pcie_gart_fini(rdev);*/
757 rdev->accel_working = false;
758 }
759 if (rdev->accel_working) {
760 r = radeon_ib_pool_init(rdev);
761 if (r) {
762 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
763 rdev->accel_working = false;
764 }
765 r = r600_ib_test(rdev);
766 if (r) {
767 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
768 rdev->accel_working = false;
769 }
770 }
771 return 0;
772}
773
774void evergreen_fini(struct radeon_device *rdev)
775{
776 evergreen_suspend(rdev);
777#if 0
778 r600_blit_fini(rdev);
779 r600_irq_fini(rdev);
780 radeon_irq_kms_fini(rdev);
781 radeon_ring_fini(rdev);
782 r600_wb_fini(rdev);
783 evergreen_pcie_gart_fini(rdev);
784#endif
785 radeon_gem_fini(rdev);
786 radeon_fence_driver_fini(rdev);
787 radeon_clocks_fini(rdev);
788 radeon_agp_fini(rdev);
789 radeon_bo_fini(rdev);
790 radeon_atombios_fini(rdev);
791 kfree(rdev->bios);
792 rdev->bios = NULL;
793 radeon_dummy_page_fini(rdev);
794}