aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/rs600.c
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2009-09-30 16:09:06 -0400
committerDave Airlie <airlied@redhat.com>2009-10-01 19:25:50 -0400
commitc010f8000a925e08d84d9391e13dd297b9fdc393 (patch)
treefb1c4bb4c417fe91f40b1d7697e8a11f27bddcea /drivers/gpu/drm/radeon/rs600.c
parent3bc6853593bd4fba357dc252b3cf60cd86a1d2ec (diff)
drm/radeon/kms: Convert RS600 to new init path
New init path allow to simply asic initialization and make easier to trace what happen on each different asic. We are removing most callback. Do a massive RS600 register cleanup to clarify RS600 register, we are still bit fuzy on some register and waiting for more informations. I don't have hw to test, so this patch is a best effort to not break anythings and to try to improve things. Signed-off-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/rs600.c')
-rw-r--r--drivers/gpu/drm/radeon/rs600.c466
1 files changed, 269 insertions, 197 deletions
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index fa8e451c64e8..9e4fdc173557 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -25,27 +25,26 @@
25 * Alex Deucher 25 * Alex Deucher
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28/* RS600 / Radeon X1250/X1270 integrated GPU
29 *
30 * This file gather function specific to RS600 which is the IGP of
31 * the X1250/X1270 family supporting intel CPU (while RS690/RS740
32 * is the X1250/X1270 supporting AMD CPU). The display engine are
33 * the avivo one, bios is an atombios, 3D block are the one of the
34 * R4XX family. The GART is different from the RS400 one and is very
35 * close to the one of the R600 family (R600 likely being an evolution
36 * of the RS600 GART block).
37 */
28#include "drmP.h" 38#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h" 39#include "radeon.h"
40#include "atom.h"
41#include "rs600d.h"
31 42
32#include "rs600_reg_safe.h" 43#include "rs600_reg_safe.h"
33 44
34/* rs600 depends on : */
35void r100_hdp_reset(struct radeon_device *rdev);
36int r100_gui_wait_for_idle(struct radeon_device *rdev);
37int r300_mc_wait_for_idle(struct radeon_device *rdev);
38void r420_pipes_init(struct radeon_device *rdev);
39
40/* This files gather functions specifics to :
41 * rs600
42 *
43 * Some of these functions might be used by newer ASICs.
44 */
45void rs600_gpu_init(struct radeon_device *rdev); 45void rs600_gpu_init(struct radeon_device *rdev);
46int rs600_mc_wait_for_idle(struct radeon_device *rdev); 46int rs600_mc_wait_for_idle(struct radeon_device *rdev);
47 47
48
49/* 48/*
50 * GART. 49 * GART.
51 */ 50 */
@@ -53,18 +52,18 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev)
53{ 52{
54 uint32_t tmp; 53 uint32_t tmp;
55 54
56 tmp = RREG32_MC(RS600_MC_PT0_CNTL); 55 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
57 tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); 56 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
58 WREG32_MC(RS600_MC_PT0_CNTL, tmp); 57 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
59 58
60 tmp = RREG32_MC(RS600_MC_PT0_CNTL); 59 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
61 tmp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE; 60 tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1);
62 WREG32_MC(RS600_MC_PT0_CNTL, tmp); 61 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
63 62
64 tmp = RREG32_MC(RS600_MC_PT0_CNTL); 63 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
65 tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); 64 tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE;
66 WREG32_MC(RS600_MC_PT0_CNTL, tmp); 65 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
67 tmp = RREG32_MC(RS600_MC_PT0_CNTL); 66 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
68} 67}
69 68
70int rs600_gart_init(struct radeon_device *rdev) 69int rs600_gart_init(struct radeon_device *rdev)
@@ -86,7 +85,7 @@ int rs600_gart_init(struct radeon_device *rdev)
86 85
87int rs600_gart_enable(struct radeon_device *rdev) 86int rs600_gart_enable(struct radeon_device *rdev)
88{ 87{
89 uint32_t tmp; 88 u32 tmp;
90 int r, i; 89 int r, i;
91 90
92 if (rdev->gart.table.vram.robj == NULL) { 91 if (rdev->gart.table.vram.robj == NULL) {
@@ -96,46 +95,50 @@ int rs600_gart_enable(struct radeon_device *rdev)
96 r = radeon_gart_table_vram_pin(rdev); 95 r = radeon_gart_table_vram_pin(rdev);
97 if (r) 96 if (r)
98 return r; 97 return r;
98 /* Enable bus master */
99 tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
100 WREG32(R_00004C_BUS_CNTL, tmp);
99 /* FIXME: setup default page */ 101 /* FIXME: setup default page */
100 WREG32_MC(RS600_MC_PT0_CNTL, 102 WREG32_MC(R_000100_MC_PT0_CNTL,
101 (RS600_EFFECTIVE_L2_CACHE_SIZE(6) | 103 (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
102 RS600_EFFECTIVE_L2_QUEUE_SIZE(6))); 104 S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
103 for (i = 0; i < 19; i++) { 105 for (i = 0; i < 19; i++) {
104 WREG32_MC(RS600_MC_PT0_CLIENT0_CNTL + i, 106 WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
105 (RS600_ENABLE_TRANSLATION_MODE_OVERRIDE | 107 S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
106 RS600_SYSTEM_ACCESS_MODE_IN_SYS | 108 S_00016C_SYSTEM_ACCESS_MODE_MASK(
107 RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE | 109 V_00016C_SYSTEM_ACCESS_MODE_IN_SYS) |
108 RS600_EFFECTIVE_L1_CACHE_SIZE(3) | 110 S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
109 RS600_ENABLE_FRAGMENT_PROCESSING | 111 V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE) |
110 RS600_EFFECTIVE_L1_QUEUE_SIZE(3))); 112 S_00016C_EFFECTIVE_L1_CACHE_SIZE(1) |
113 S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
114 S_00016C_EFFECTIVE_L1_QUEUE_SIZE(1));
111 } 115 }
112 116
113 /* System context map to GART space */ 117 /* System context map to GART space */
114 WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_location); 118 WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_start);
115 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 119 WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.gtt_end);
116 WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, tmp);
117 120
118 /* enable first context */ 121 /* enable first context */
119 WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_location); 122 WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
120 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 123 WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
121 WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR, tmp); 124 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
122 WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL, 125 S_000102_ENABLE_PAGE_TABLE(1) |
123 (RS600_ENABLE_PAGE_TABLE | RS600_PAGE_TABLE_TYPE_FLAT)); 126 S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
124 /* disable all other contexts */ 127 /* disable all other contexts */
125 for (i = 1; i < 8; i++) { 128 for (i = 1; i < 8; i++) {
126 WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL + i, 0); 129 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
127 } 130 }
128 131
129 /* setup the page table */ 132 /* setup the page table */
130 WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, 133 WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
131 rdev->gart.table_addr); 134 rdev->gart.table_addr);
132 WREG32_MC(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); 135 WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
133 136
134 /* enable page tables */ 137 /* enable page tables */
135 tmp = RREG32_MC(RS600_MC_PT0_CNTL); 138 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
136 WREG32_MC(RS600_MC_PT0_CNTL, (tmp | RS600_ENABLE_PT)); 139 WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
137 tmp = RREG32_MC(RS600_MC_CNTL1); 140 tmp = RREG32_MC(R_000009_MC_CNTL1);
138 WREG32_MC(RS600_MC_CNTL1, (tmp | RS600_ENABLE_PAGE_TABLES)); 141 WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1)));
139 rs600_gart_tlb_flush(rdev); 142 rs600_gart_tlb_flush(rdev);
140 rdev->gart.ready = true; 143 rdev->gart.ready = true;
141 return 0; 144 return 0;
@@ -146,10 +149,9 @@ void rs600_gart_disable(struct radeon_device *rdev)
146 uint32_t tmp; 149 uint32_t tmp;
147 150
148 /* FIXME: disable out of gart access */ 151 /* FIXME: disable out of gart access */
149 WREG32_MC(RS600_MC_PT0_CNTL, 0); 152 WREG32_MC(R_000100_MC_PT0_CNTL, 0);
150 tmp = RREG32_MC(RS600_MC_CNTL1); 153 tmp = RREG32_MC(R_000009_MC_CNTL1);
151 tmp &= ~RS600_ENABLE_PAGE_TABLES; 154 WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
152 WREG32_MC(RS600_MC_CNTL1, tmp);
153 if (rdev->gart.table.vram.robj) { 155 if (rdev->gart.table.vram.robj) {
154 radeon_object_kunmap(rdev->gart.table.vram.robj); 156 radeon_object_kunmap(rdev->gart.table.vram.robj);
155 radeon_object_unpin(rdev->gart.table.vram.robj); 157 radeon_object_unpin(rdev->gart.table.vram.robj);
@@ -183,125 +185,46 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
183 return 0; 185 return 0;
184} 186}
185 187
186
187/*
188 * MC.
189 */
190void rs600_mc_disable_clients(struct radeon_device *rdev)
191{
192 unsigned tmp;
193
194 if (r100_gui_wait_for_idle(rdev)) {
195 printk(KERN_WARNING "Failed to wait GUI idle while "
196 "programming pipes. Bad things might happen.\n");
197 }
198
199 rv515_vga_render_disable(rdev);
200
201 tmp = RREG32(AVIVO_D1VGA_CONTROL);
202 WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
203 tmp = RREG32(AVIVO_D2VGA_CONTROL);
204 WREG32(AVIVO_D2VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
205
206 tmp = RREG32(AVIVO_D1CRTC_CONTROL);
207 WREG32(AVIVO_D1CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
208 tmp = RREG32(AVIVO_D2CRTC_CONTROL);
209 WREG32(AVIVO_D2CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN);
210
211 /* make sure all previous write got through */
212 tmp = RREG32(AVIVO_D2CRTC_CONTROL);
213
214 mdelay(1);
215}
216
217int rs600_mc_init(struct radeon_device *rdev)
218{
219 uint32_t tmp;
220 int r;
221
222 if (r100_debugfs_rbbm_init(rdev)) {
223 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
224 }
225
226 rs600_gpu_init(rdev);
227 rs600_gart_disable(rdev);
228
229 /* Setup GPU memory space */
230 rdev->mc.vram_location = 0xFFFFFFFFUL;
231 rdev->mc.gtt_location = 0xFFFFFFFFUL;
232 r = radeon_mc_setup(rdev);
233 if (r) {
234 return r;
235 }
236
237 /* Program GPU memory space */
238 /* Enable bus master */
239 tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
240 WREG32(RADEON_BUS_CNTL, tmp);
241 /* FIXME: What does AGP means for such chipset ? */
242 WREG32_MC(RS600_MC_AGP_LOCATION, 0x0FFFFFFF);
243 /* FIXME: are this AGP reg in indirect MC range ? */
244 WREG32_MC(RS600_MC_AGP_BASE, 0);
245 WREG32_MC(RS600_MC_AGP_BASE_2, 0);
246 rs600_mc_disable_clients(rdev);
247 if (rs600_mc_wait_for_idle(rdev)) {
248 printk(KERN_WARNING "Failed to wait MC idle while "
249 "programming pipes. Bad things might happen.\n");
250 }
251 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
252 tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16);
253 tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16);
254 WREG32_MC(RS600_MC_FB_LOCATION, tmp);
255 WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
256 return 0;
257}
258
259void rs600_mc_fini(struct radeon_device *rdev)
260{
261}
262
263
264/*
265 * Interrupts
266 */
267int rs600_irq_set(struct radeon_device *rdev) 188int rs600_irq_set(struct radeon_device *rdev)
268{ 189{
269 uint32_t tmp = 0; 190 uint32_t tmp = 0;
270 uint32_t mode_int = 0; 191 uint32_t mode_int = 0;
271 192
272 if (rdev->irq.sw_int) { 193 if (rdev->irq.sw_int) {
273 tmp |= RADEON_SW_INT_ENABLE; 194 tmp |= S_000040_SW_INT_EN(1);
274 } 195 }
275 if (rdev->irq.crtc_vblank_int[0]) { 196 if (rdev->irq.crtc_vblank_int[0]) {
276 mode_int |= AVIVO_D1MODE_INT_MASK; 197 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
277 } 198 }
278 if (rdev->irq.crtc_vblank_int[1]) { 199 if (rdev->irq.crtc_vblank_int[1]) {
279 mode_int |= AVIVO_D2MODE_INT_MASK; 200 mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
280 } 201 }
281 WREG32(RADEON_GEN_INT_CNTL, tmp); 202 WREG32(R_000040_GEN_INT_CNTL, tmp);
282 WREG32(AVIVO_DxMODE_INT_MASK, mode_int); 203 WREG32(R_006540_DxMODE_INT_MASK, mode_int);
283 return 0; 204 return 0;
284} 205}
285 206
286static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) 207static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
287{ 208{
288 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); 209 uint32_t irqs = RREG32(R_000040_GEN_INT_CNTL);
289 uint32_t irq_mask = RADEON_SW_INT_TEST; 210 uint32_t irq_mask = ~C_000040_SW_INT_EN;
290 211
291 if (irqs & AVIVO_DISPLAY_INT_STATUS) { 212 if (G_000040_DISPLAY_INT_STATUS(irqs)) {
292 *r500_disp_int = RREG32(AVIVO_DISP_INTERRUPT_STATUS); 213 *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
293 if (*r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) { 214 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) {
294 WREG32(AVIVO_D1MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK); 215 WREG32(R_006534_D1MODE_VBLANK_STATUS,
216 S_006534_D1MODE_VBLANK_ACK(1));
295 } 217 }
296 if (*r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) { 218 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) {
297 WREG32(AVIVO_D2MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK); 219 WREG32(R_006D34_D2MODE_VBLANK_STATUS,
220 S_006D34_D2MODE_VBLANK_ACK(1));
298 } 221 }
299 } else { 222 } else {
300 *r500_disp_int = 0; 223 *r500_disp_int = 0;
301 } 224 }
302 225
303 if (irqs) { 226 if (irqs) {
304 WREG32(RADEON_GEN_INT_STATUS, irqs); 227 WREG32(R_000040_GEN_INT_CNTL, irqs);
305 } 228 }
306 return irqs & irq_mask; 229 return irqs & irq_mask;
307} 230}
@@ -317,16 +240,13 @@ int rs600_irq_process(struct radeon_device *rdev)
317 } 240 }
318 while (status || r500_disp_int) { 241 while (status || r500_disp_int) {
319 /* SW interrupt */ 242 /* SW interrupt */
320 if (status & RADEON_SW_INT_TEST) { 243 if (G_000040_SW_INT_EN(status))
321 radeon_fence_process(rdev); 244 radeon_fence_process(rdev);
322 }
323 /* Vertical blank interrupts */ 245 /* Vertical blank interrupts */
324 if (r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) { 246 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int))
325 drm_handle_vblank(rdev->ddev, 0); 247 drm_handle_vblank(rdev->ddev, 0);
326 } 248 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int))
327 if (r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) {
328 drm_handle_vblank(rdev->ddev, 1); 249 drm_handle_vblank(rdev->ddev, 1);
329 }
330 status = rs600_irq_ack(rdev, &r500_disp_int); 250 status = rs600_irq_ack(rdev, &r500_disp_int);
331 } 251 }
332 return IRQ_HANDLED; 252 return IRQ_HANDLED;
@@ -335,53 +255,34 @@ int rs600_irq_process(struct radeon_device *rdev)
335u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc) 255u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc)
336{ 256{
337 if (crtc == 0) 257 if (crtc == 0)
338 return RREG32(AVIVO_D1CRTC_FRAME_COUNT); 258 return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT);
339 else 259 else
340 return RREG32(AVIVO_D2CRTC_FRAME_COUNT); 260 return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT);
341} 261}
342 262
343
344/*
345 * Global GPU functions
346 */
347int rs600_mc_wait_for_idle(struct radeon_device *rdev) 263int rs600_mc_wait_for_idle(struct radeon_device *rdev)
348{ 264{
349 unsigned i; 265 unsigned i;
350 uint32_t tmp;
351 266
352 for (i = 0; i < rdev->usec_timeout; i++) { 267 for (i = 0; i < rdev->usec_timeout; i++) {
353 /* read MC_STATUS */ 268 if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS)))
354 tmp = RREG32_MC(RS600_MC_STATUS);
355 if (tmp & RS600_MC_STATUS_IDLE) {
356 return 0; 269 return 0;
357 } 270 udelay(1);
358 DRM_UDELAY(1);
359 } 271 }
360 return -1; 272 return -1;
361} 273}
362 274
363void rs600_errata(struct radeon_device *rdev)
364{
365 rdev->pll_errata = 0;
366}
367
368void rs600_gpu_init(struct radeon_device *rdev) 275void rs600_gpu_init(struct radeon_device *rdev)
369{ 276{
370 /* FIXME: HDP same place on rs600 ? */ 277 /* FIXME: HDP same place on rs600 ? */
371 r100_hdp_reset(rdev); 278 r100_hdp_reset(rdev);
372 rv515_vga_render_disable(rdev);
373 /* FIXME: is this correct ? */ 279 /* FIXME: is this correct ? */
374 r420_pipes_init(rdev); 280 r420_pipes_init(rdev);
375 if (rs600_mc_wait_for_idle(rdev)) { 281 /* Wait for mc idle */
376 printk(KERN_WARNING "Failed to wait MC idle while " 282 if (rs600_mc_wait_for_idle(rdev))
377 "programming pipes. Bad things might happen.\n"); 283 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
378 }
379} 284}
380 285
381
382/*
383 * VRAM info.
384 */
385void rs600_vram_info(struct radeon_device *rdev) 286void rs600_vram_info(struct radeon_device *rdev)
386{ 287{
387 /* FIXME: to do or is these values sane ? */ 288 /* FIXME: to do or is these values sane ? */
@@ -394,26 +295,24 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
394 /* FIXME: implement, should this be like rs690 ? */ 295 /* FIXME: implement, should this be like rs690 ? */
395} 296}
396 297
397
398/*
399 * Indirect registers accessor
400 */
401uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) 298uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
402{ 299{
403 uint32_t r; 300 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
404 301 S_000070_MC_IND_CITF_ARB0(1));
405 WREG32(RS600_MC_INDEX, 302 return RREG32(R_000074_MC_IND_DATA);
406 ((reg & RS600_MC_ADDR_MASK) | RS600_MC_IND_CITF_ARB0));
407 r = RREG32(RS600_MC_DATA);
408 return r;
409} 303}
410 304
411void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 305void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
412{ 306{
413 WREG32(RS600_MC_INDEX, 307 WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
414 RS600_MC_IND_WR_EN | RS600_MC_IND_CITF_ARB0 | 308 S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
415 ((reg) & RS600_MC_ADDR_MASK)); 309 WREG32(R_000074_MC_IND_DATA, v);
416 WREG32(RS600_MC_DATA, v); 310}
311
312void rs600_debugfs(struct radeon_device *rdev)
313{
314 if (r100_debugfs_rbbm_init(rdev))
315 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
417} 316}
418 317
419void rs600_set_safe_registers(struct radeon_device *rdev) 318void rs600_set_safe_registers(struct radeon_device *rdev)
@@ -422,8 +321,181 @@ void rs600_set_safe_registers(struct radeon_device *rdev)
422 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm); 321 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm);
423} 322}
424 323
324static void rs600_mc_program(struct radeon_device *rdev)
325{
326 struct rv515_mc_save save;
327
328 /* Stops all mc clients */
329 rv515_mc_stop(rdev, &save);
330
331 /* Wait for mc idle */
332 if (rs600_mc_wait_for_idle(rdev))
333 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
334
335 /* FIXME: What does AGP means for such chipset ? */
336 WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF);
337 WREG32_MC(R_000006_AGP_BASE, 0);
338 WREG32_MC(R_000007_AGP_BASE_2, 0);
339 /* Program MC */
340 WREG32_MC(R_000004_MC_FB_LOCATION,
341 S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
342 S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
343 WREG32(R_000134_HDP_FB_LOCATION,
344 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
345
346 rv515_mc_resume(rdev, &save);
347}
348
349static int rs600_startup(struct radeon_device *rdev)
350{
351 int r;
352
353 rs600_mc_program(rdev);
354 /* Resume clock */
355 rv515_clock_startup(rdev);
356 /* Initialize GPU configuration (# pipes, ...) */
357 rs600_gpu_init(rdev);
358 /* Initialize GART (initialize after TTM so we can allocate
359 * memory through TTM but finalize after TTM) */
360 r = rs600_gart_enable(rdev);
361 if (r)
362 return r;
363 /* Enable IRQ */
364 rdev->irq.sw_int = true;
365 rs600_irq_set(rdev);
366 /* 1M ring buffer */
367 r = r100_cp_init(rdev, 1024 * 1024);
368 if (r) {
369 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
370 return r;
371 }
372 r = r100_wb_init(rdev);
373 if (r)
374 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
375 r = r100_ib_init(rdev);
376 if (r) {
377 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
378 return r;
379 }
380 return 0;
381}
382
383int rs600_resume(struct radeon_device *rdev)
384{
385 /* Make sur GART are not working */
386 rs600_gart_disable(rdev);
387 /* Resume clock before doing reset */
388 rv515_clock_startup(rdev);
389 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
390 if (radeon_gpu_reset(rdev)) {
391 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
392 RREG32(R_000E40_RBBM_STATUS),
393 RREG32(R_0007C0_CP_STAT));
394 }
395 /* post */
396 atom_asic_init(rdev->mode_info.atom_context);
397 /* Resume clock after posting */
398 rv515_clock_startup(rdev);
399 return rs600_startup(rdev);
400}
401
402int rs600_suspend(struct radeon_device *rdev)
403{
404 r100_cp_disable(rdev);
405 r100_wb_disable(rdev);
406 r100_irq_disable(rdev);
407 rs600_gart_disable(rdev);
408 return 0;
409}
410
411void rs600_fini(struct radeon_device *rdev)
412{
413 rs600_suspend(rdev);
414 r100_cp_fini(rdev);
415 r100_wb_fini(rdev);
416 r100_ib_fini(rdev);
417 radeon_gem_fini(rdev);
418 rs600_gart_fini(rdev);
419 radeon_irq_kms_fini(rdev);
420 radeon_fence_driver_fini(rdev);
421 radeon_object_fini(rdev);
422 radeon_atombios_fini(rdev);
423 kfree(rdev->bios);
424 rdev->bios = NULL;
425}
426
425int rs600_init(struct radeon_device *rdev) 427int rs600_init(struct radeon_device *rdev)
426{ 428{
427 rs600_set_safe_registers(rdev); 429 int r;
430
431 rdev->new_init_path = true;
432 /* Disable VGA */
433 rv515_vga_render_disable(rdev);
434 /* Initialize scratch registers */
435 radeon_scratch_init(rdev);
436 /* Initialize surface registers */
437 radeon_surface_init(rdev);
438 /* BIOS */
439 if (!radeon_get_bios(rdev)) {
440 if (ASIC_IS_AVIVO(rdev))
441 return -EINVAL;
442 }
443 if (rdev->is_atom_bios) {
444 r = radeon_atombios_init(rdev);
445 if (r)
446 return r;
447 } else {
448 dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n");
449 return -EINVAL;
450 }
451 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
452 if (radeon_gpu_reset(rdev)) {
453 dev_warn(rdev->dev,
454 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
455 RREG32(R_000E40_RBBM_STATUS),
456 RREG32(R_0007C0_CP_STAT));
457 }
458 /* check if cards are posted or not */
459 if (!radeon_card_posted(rdev) && rdev->bios) {
460 DRM_INFO("GPU not posted. posting now...\n");
461 atom_asic_init(rdev->mode_info.atom_context);
462 }
463 /* Initialize clocks */
464 radeon_get_clock_info(rdev->ddev);
465 /* Get vram informations */
466 rs600_vram_info(rdev);
467 /* Initialize memory controller (also test AGP) */
468 r = r420_mc_init(rdev);
469 if (r)
470 return r;
471 rs600_debugfs(rdev);
472 /* Fence driver */
473 r = radeon_fence_driver_init(rdev);
474 if (r)
475 return r;
476 r = radeon_irq_kms_init(rdev);
477 if (r)
478 return r;
479 /* Memory manager */
480 r = radeon_object_init(rdev);
481 if (r)
482 return r;
483 r = rs600_gart_init(rdev);
484 if (r)
485 return r;
486 rs600_set_safe_registers(rdev);
487 rdev->accel_working = true;
488 r = rs600_startup(rdev);
489 if (r) {
490 /* Somethings want wront with the accel init stop accel */
491 dev_err(rdev->dev, "Disabling GPU acceleration\n");
492 rs600_suspend(rdev);
493 r100_cp_fini(rdev);
494 r100_wb_fini(rdev);
495 r100_ib_fini(rdev);
496 rs600_gart_fini(rdev);
497 radeon_irq_kms_fini(rdev);
498 rdev->accel_working = false;
499 }
428 return 0; 500 return 0;
429} 501}