diff options
Diffstat (limited to 'drivers/char/drm/radeon_cp.c')
-rw-r--r-- | drivers/char/drm/radeon_cp.c | 1773 |
1 files changed, 0 insertions, 1773 deletions
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c deleted file mode 100644 index e53158f0ecb5..000000000000 --- a/drivers/char/drm/radeon_cp.c +++ /dev/null | |||
@@ -1,1773 +0,0 @@ | |||
1 | /* radeon_cp.c -- CP support for Radeon -*- linux-c -*- */ | ||
2 | /* | ||
3 | * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. | ||
4 | * Copyright 2000 VA Linux Systems, Inc., Fremont, California. | ||
5 | * Copyright 2007 Advanced Micro Devices, Inc. | ||
6 | * All Rights Reserved. | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
25 | * DEALINGS IN THE SOFTWARE. | ||
26 | * | ||
27 | * Authors: | ||
28 | * Kevin E. Martin <martin@valinux.com> | ||
29 | * Gareth Hughes <gareth@valinux.com> | ||
30 | */ | ||
31 | |||
32 | #include "drmP.h" | ||
33 | #include "drm.h" | ||
34 | #include "radeon_drm.h" | ||
35 | #include "radeon_drv.h" | ||
36 | #include "r300_reg.h" | ||
37 | |||
38 | #include "radeon_microcode.h" | ||
39 | |||
40 | #define RADEON_FIFO_DEBUG 0 | ||
41 | |||
42 | static int radeon_do_cleanup_cp(struct drm_device * dev); | ||
43 | |||
44 | static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) | ||
45 | { | ||
46 | u32 ret; | ||
47 | RADEON_WRITE(R520_MC_IND_INDEX, 0x7f0000 | (addr & 0xff)); | ||
48 | ret = RADEON_READ(R520_MC_IND_DATA); | ||
49 | RADEON_WRITE(R520_MC_IND_INDEX, 0); | ||
50 | return ret; | ||
51 | } | ||
52 | |||
53 | static u32 RS480_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) | ||
54 | { | ||
55 | u32 ret; | ||
56 | RADEON_WRITE(RS480_NB_MC_INDEX, addr & 0xff); | ||
57 | ret = RADEON_READ(RS480_NB_MC_DATA); | ||
58 | RADEON_WRITE(RS480_NB_MC_INDEX, 0xff); | ||
59 | return ret; | ||
60 | } | ||
61 | |||
62 | static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) | ||
63 | { | ||
64 | u32 ret; | ||
65 | RADEON_WRITE(RS690_MC_INDEX, (addr & RS690_MC_INDEX_MASK)); | ||
66 | ret = RADEON_READ(RS690_MC_DATA); | ||
67 | RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_MASK); | ||
68 | return ret; | ||
69 | } | ||
70 | |||
71 | static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) | ||
72 | { | ||
73 | if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) | ||
74 | return RS690_READ_MCIND(dev_priv, addr); | ||
75 | else | ||
76 | return RS480_READ_MCIND(dev_priv, addr); | ||
77 | } | ||
78 | |||
79 | u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv) | ||
80 | { | ||
81 | |||
82 | if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) | ||
83 | return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION); | ||
84 | else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) | ||
85 | return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION); | ||
86 | else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) | ||
87 | return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION); | ||
88 | else | ||
89 | return RADEON_READ(RADEON_MC_FB_LOCATION); | ||
90 | } | ||
91 | |||
92 | static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc) | ||
93 | { | ||
94 | if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) | ||
95 | R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc); | ||
96 | else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) | ||
97 | RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc); | ||
98 | else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) | ||
99 | R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc); | ||
100 | else | ||
101 | RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc); | ||
102 | } | ||
103 | |||
104 | static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc) | ||
105 | { | ||
106 | if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) | ||
107 | R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc); | ||
108 | else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) | ||
109 | RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc); | ||
110 | else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) | ||
111 | R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc); | ||
112 | else | ||
113 | RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc); | ||
114 | } | ||
115 | |||
116 | static void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base) | ||
117 | { | ||
118 | u32 agp_base_hi = upper_32_bits(agp_base); | ||
119 | u32 agp_base_lo = agp_base & 0xffffffff; | ||
120 | |||
121 | if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) { | ||
122 | R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo); | ||
123 | R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi); | ||
124 | } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) { | ||
125 | RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo); | ||
126 | RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi); | ||
127 | } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) { | ||
128 | R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo); | ||
129 | R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi); | ||
130 | } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480) { | ||
131 | RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo); | ||
132 | RADEON_WRITE(RS480_AGP_BASE_2, 0); | ||
133 | } else { | ||
134 | RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo); | ||
135 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200) | ||
136 | RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi); | ||
137 | } | ||
138 | } | ||
139 | |||
140 | static int RADEON_READ_PLL(struct drm_device * dev, int addr) | ||
141 | { | ||
142 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
143 | |||
144 | RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, addr & 0x1f); | ||
145 | return RADEON_READ(RADEON_CLOCK_CNTL_DATA); | ||
146 | } | ||
147 | |||
148 | static u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr) | ||
149 | { | ||
150 | RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff); | ||
151 | return RADEON_READ(RADEON_PCIE_DATA); | ||
152 | } | ||
153 | |||
154 | #if RADEON_FIFO_DEBUG | ||
155 | static void radeon_status(drm_radeon_private_t * dev_priv) | ||
156 | { | ||
157 | printk("%s:\n", __func__); | ||
158 | printk("RBBM_STATUS = 0x%08x\n", | ||
159 | (unsigned int)RADEON_READ(RADEON_RBBM_STATUS)); | ||
160 | printk("CP_RB_RTPR = 0x%08x\n", | ||
161 | (unsigned int)RADEON_READ(RADEON_CP_RB_RPTR)); | ||
162 | printk("CP_RB_WTPR = 0x%08x\n", | ||
163 | (unsigned int)RADEON_READ(RADEON_CP_RB_WPTR)); | ||
164 | printk("AIC_CNTL = 0x%08x\n", | ||
165 | (unsigned int)RADEON_READ(RADEON_AIC_CNTL)); | ||
166 | printk("AIC_STAT = 0x%08x\n", | ||
167 | (unsigned int)RADEON_READ(RADEON_AIC_STAT)); | ||
168 | printk("AIC_PT_BASE = 0x%08x\n", | ||
169 | (unsigned int)RADEON_READ(RADEON_AIC_PT_BASE)); | ||
170 | printk("TLB_ADDR = 0x%08x\n", | ||
171 | (unsigned int)RADEON_READ(RADEON_AIC_TLB_ADDR)); | ||
172 | printk("TLB_DATA = 0x%08x\n", | ||
173 | (unsigned int)RADEON_READ(RADEON_AIC_TLB_DATA)); | ||
174 | } | ||
175 | #endif | ||
176 | |||
177 | /* ================================================================ | ||
178 | * Engine, FIFO control | ||
179 | */ | ||
180 | |||
181 | static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv) | ||
182 | { | ||
183 | u32 tmp; | ||
184 | int i; | ||
185 | |||
186 | dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; | ||
187 | |||
188 | if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { | ||
189 | tmp = RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT); | ||
190 | tmp |= RADEON_RB3D_DC_FLUSH_ALL; | ||
191 | RADEON_WRITE(RADEON_RB3D_DSTCACHE_CTLSTAT, tmp); | ||
192 | |||
193 | for (i = 0; i < dev_priv->usec_timeout; i++) { | ||
194 | if (!(RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT) | ||
195 | & RADEON_RB3D_DC_BUSY)) { | ||
196 | return 0; | ||
197 | } | ||
198 | DRM_UDELAY(1); | ||
199 | } | ||
200 | } else { | ||
201 | /* 3D */ | ||
202 | tmp = RADEON_READ(R300_RB3D_DSTCACHE_CTLSTAT); | ||
203 | tmp |= RADEON_RB3D_DC_FLUSH_ALL; | ||
204 | RADEON_WRITE(R300_RB3D_DSTCACHE_CTLSTAT, tmp); | ||
205 | |||
206 | /* 2D */ | ||
207 | tmp = RADEON_READ(R300_DSTCACHE_CTLSTAT); | ||
208 | tmp |= RADEON_RB3D_DC_FLUSH_ALL; | ||
209 | RADEON_WRITE(R300_DSTCACHE_CTLSTAT, tmp); | ||
210 | |||
211 | for (i = 0; i < dev_priv->usec_timeout; i++) { | ||
212 | if (!(RADEON_READ(R300_DSTCACHE_CTLSTAT) | ||
213 | & RADEON_RB3D_DC_BUSY)) { | ||
214 | return 0; | ||
215 | } | ||
216 | DRM_UDELAY(1); | ||
217 | } | ||
218 | } | ||
219 | |||
220 | #if RADEON_FIFO_DEBUG | ||
221 | DRM_ERROR("failed!\n"); | ||
222 | radeon_status(dev_priv); | ||
223 | #endif | ||
224 | return -EBUSY; | ||
225 | } | ||
226 | |||
227 | static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries) | ||
228 | { | ||
229 | int i; | ||
230 | |||
231 | dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; | ||
232 | |||
233 | for (i = 0; i < dev_priv->usec_timeout; i++) { | ||
234 | int slots = (RADEON_READ(RADEON_RBBM_STATUS) | ||
235 | & RADEON_RBBM_FIFOCNT_MASK); | ||
236 | if (slots >= entries) | ||
237 | return 0; | ||
238 | DRM_UDELAY(1); | ||
239 | } | ||
240 | |||
241 | #if RADEON_FIFO_DEBUG | ||
242 | DRM_ERROR("failed!\n"); | ||
243 | radeon_status(dev_priv); | ||
244 | #endif | ||
245 | return -EBUSY; | ||
246 | } | ||
247 | |||
248 | static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv) | ||
249 | { | ||
250 | int i, ret; | ||
251 | |||
252 | dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; | ||
253 | |||
254 | ret = radeon_do_wait_for_fifo(dev_priv, 64); | ||
255 | if (ret) | ||
256 | return ret; | ||
257 | |||
258 | for (i = 0; i < dev_priv->usec_timeout; i++) { | ||
259 | if (!(RADEON_READ(RADEON_RBBM_STATUS) | ||
260 | & RADEON_RBBM_ACTIVE)) { | ||
261 | radeon_do_pixcache_flush(dev_priv); | ||
262 | return 0; | ||
263 | } | ||
264 | DRM_UDELAY(1); | ||
265 | } | ||
266 | |||
267 | #if RADEON_FIFO_DEBUG | ||
268 | DRM_ERROR("failed!\n"); | ||
269 | radeon_status(dev_priv); | ||
270 | #endif | ||
271 | return -EBUSY; | ||
272 | } | ||
273 | |||
274 | static void radeon_init_pipes(drm_radeon_private_t *dev_priv) | ||
275 | { | ||
276 | uint32_t gb_tile_config, gb_pipe_sel = 0; | ||
277 | |||
278 | /* RS4xx/RS6xx/R4xx/R5xx */ | ||
279 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) { | ||
280 | gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT); | ||
281 | dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1; | ||
282 | } else { | ||
283 | /* R3xx */ | ||
284 | if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) || | ||
285 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) { | ||
286 | dev_priv->num_gb_pipes = 2; | ||
287 | } else { | ||
288 | /* R3Vxx */ | ||
289 | dev_priv->num_gb_pipes = 1; | ||
290 | } | ||
291 | } | ||
292 | DRM_INFO("Num pipes: %d\n", dev_priv->num_gb_pipes); | ||
293 | |||
294 | gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16 /*| R300_SUBPIXEL_1_16*/); | ||
295 | |||
296 | switch (dev_priv->num_gb_pipes) { | ||
297 | case 2: gb_tile_config |= R300_PIPE_COUNT_R300; break; | ||
298 | case 3: gb_tile_config |= R300_PIPE_COUNT_R420_3P; break; | ||
299 | case 4: gb_tile_config |= R300_PIPE_COUNT_R420; break; | ||
300 | default: | ||
301 | case 1: gb_tile_config |= R300_PIPE_COUNT_RV350; break; | ||
302 | } | ||
303 | |||
304 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) { | ||
305 | RADEON_WRITE_PLL(R500_DYN_SCLK_PWMEM_PIPE, (1 | ((gb_pipe_sel >> 8) & 0xf) << 4)); | ||
306 | RADEON_WRITE(R500_SU_REG_DEST, ((1 << dev_priv->num_gb_pipes) - 1)); | ||
307 | } | ||
308 | RADEON_WRITE(R300_GB_TILE_CONFIG, gb_tile_config); | ||
309 | radeon_do_wait_for_idle(dev_priv); | ||
310 | RADEON_WRITE(R300_DST_PIPE_CONFIG, RADEON_READ(R300_DST_PIPE_CONFIG) | R300_PIPE_AUTO_CONFIG); | ||
311 | RADEON_WRITE(R300_RB2D_DSTCACHE_MODE, (RADEON_READ(R300_RB2D_DSTCACHE_MODE) | | ||
312 | R300_DC_AUTOFLUSH_ENABLE | | ||
313 | R300_DC_DC_DISABLE_IGNORE_PE)); | ||
314 | |||
315 | |||
316 | } | ||
317 | |||
318 | /* ================================================================ | ||
319 | * CP control, initialization | ||
320 | */ | ||
321 | |||
322 | /* Load the microcode for the CP */ | ||
323 | static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv) | ||
324 | { | ||
325 | int i; | ||
326 | DRM_DEBUG("\n"); | ||
327 | |||
328 | radeon_do_wait_for_idle(dev_priv); | ||
329 | |||
330 | RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0); | ||
331 | if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R100) || | ||
332 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV100) || | ||
333 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV200) || | ||
334 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS100) || | ||
335 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS200)) { | ||
336 | DRM_INFO("Loading R100 Microcode\n"); | ||
337 | for (i = 0; i < 256; i++) { | ||
338 | RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, | ||
339 | R100_cp_microcode[i][1]); | ||
340 | RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, | ||
341 | R100_cp_microcode[i][0]); | ||
342 | } | ||
343 | } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R200) || | ||
344 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV250) || | ||
345 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV280) || | ||
346 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS300)) { | ||
347 | DRM_INFO("Loading R200 Microcode\n"); | ||
348 | for (i = 0; i < 256; i++) { | ||
349 | RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, | ||
350 | R200_cp_microcode[i][1]); | ||
351 | RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, | ||
352 | R200_cp_microcode[i][0]); | ||
353 | } | ||
354 | } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) || | ||
355 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) || | ||
356 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) || | ||
357 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) || | ||
358 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { | ||
359 | DRM_INFO("Loading R300 Microcode\n"); | ||
360 | for (i = 0; i < 256; i++) { | ||
361 | RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, | ||
362 | R300_cp_microcode[i][1]); | ||
363 | RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, | ||
364 | R300_cp_microcode[i][0]); | ||
365 | } | ||
366 | } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) || | ||
367 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) { | ||
368 | DRM_INFO("Loading R400 Microcode\n"); | ||
369 | for (i = 0; i < 256; i++) { | ||
370 | RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, | ||
371 | R420_cp_microcode[i][1]); | ||
372 | RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, | ||
373 | R420_cp_microcode[i][0]); | ||
374 | } | ||
375 | } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) { | ||
376 | DRM_INFO("Loading RS690 Microcode\n"); | ||
377 | for (i = 0; i < 256; i++) { | ||
378 | RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, | ||
379 | RS690_cp_microcode[i][1]); | ||
380 | RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, | ||
381 | RS690_cp_microcode[i][0]); | ||
382 | } | ||
383 | } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) || | ||
384 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R520) || | ||
385 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) || | ||
386 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R580) || | ||
387 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV560) || | ||
388 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV570)) { | ||
389 | DRM_INFO("Loading R500 Microcode\n"); | ||
390 | for (i = 0; i < 256; i++) { | ||
391 | RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, | ||
392 | R520_cp_microcode[i][1]); | ||
393 | RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, | ||
394 | R520_cp_microcode[i][0]); | ||
395 | } | ||
396 | } | ||
397 | } | ||
398 | |||
399 | /* Flush any pending commands to the CP. This should only be used just | ||
400 | * prior to a wait for idle, as it informs the engine that the command | ||
401 | * stream is ending. | ||
402 | */ | ||
403 | static void radeon_do_cp_flush(drm_radeon_private_t * dev_priv) | ||
404 | { | ||
405 | DRM_DEBUG("\n"); | ||
406 | #if 0 | ||
407 | u32 tmp; | ||
408 | |||
409 | tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1 << 31); | ||
410 | RADEON_WRITE(RADEON_CP_RB_WPTR, tmp); | ||
411 | #endif | ||
412 | } | ||
413 | |||
414 | /* Wait for the CP to go idle. | ||
415 | */ | ||
416 | int radeon_do_cp_idle(drm_radeon_private_t * dev_priv) | ||
417 | { | ||
418 | RING_LOCALS; | ||
419 | DRM_DEBUG("\n"); | ||
420 | |||
421 | BEGIN_RING(6); | ||
422 | |||
423 | RADEON_PURGE_CACHE(); | ||
424 | RADEON_PURGE_ZCACHE(); | ||
425 | RADEON_WAIT_UNTIL_IDLE(); | ||
426 | |||
427 | ADVANCE_RING(); | ||
428 | COMMIT_RING(); | ||
429 | |||
430 | return radeon_do_wait_for_idle(dev_priv); | ||
431 | } | ||
432 | |||
433 | /* Start the Command Processor. | ||
434 | */ | ||
435 | static void radeon_do_cp_start(drm_radeon_private_t * dev_priv) | ||
436 | { | ||
437 | RING_LOCALS; | ||
438 | DRM_DEBUG("\n"); | ||
439 | |||
440 | radeon_do_wait_for_idle(dev_priv); | ||
441 | |||
442 | RADEON_WRITE(RADEON_CP_CSQ_CNTL, dev_priv->cp_mode); | ||
443 | |||
444 | dev_priv->cp_running = 1; | ||
445 | |||
446 | BEGIN_RING(6); | ||
447 | |||
448 | RADEON_PURGE_CACHE(); | ||
449 | RADEON_PURGE_ZCACHE(); | ||
450 | RADEON_WAIT_UNTIL_IDLE(); | ||
451 | |||
452 | ADVANCE_RING(); | ||
453 | COMMIT_RING(); | ||
454 | } | ||
455 | |||
456 | /* Reset the Command Processor. This will not flush any pending | ||
457 | * commands, so you must wait for the CP command stream to complete | ||
458 | * before calling this routine. | ||
459 | */ | ||
460 | static void radeon_do_cp_reset(drm_radeon_private_t * dev_priv) | ||
461 | { | ||
462 | u32 cur_read_ptr; | ||
463 | DRM_DEBUG("\n"); | ||
464 | |||
465 | cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR); | ||
466 | RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr); | ||
467 | SET_RING_HEAD(dev_priv, cur_read_ptr); | ||
468 | dev_priv->ring.tail = cur_read_ptr; | ||
469 | } | ||
470 | |||
471 | /* Stop the Command Processor. This will not flush any pending | ||
472 | * commands, so you must flush the command stream and wait for the CP | ||
473 | * to go idle before calling this routine. | ||
474 | */ | ||
475 | static void radeon_do_cp_stop(drm_radeon_private_t * dev_priv) | ||
476 | { | ||
477 | DRM_DEBUG("\n"); | ||
478 | |||
479 | RADEON_WRITE(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS); | ||
480 | |||
481 | dev_priv->cp_running = 0; | ||
482 | } | ||
483 | |||
484 | /* Reset the engine. This will stop the CP if it is running. | ||
485 | */ | ||
486 | static int radeon_do_engine_reset(struct drm_device * dev) | ||
487 | { | ||
488 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
489 | u32 clock_cntl_index = 0, mclk_cntl = 0, rbbm_soft_reset; | ||
490 | DRM_DEBUG("\n"); | ||
491 | |||
492 | radeon_do_pixcache_flush(dev_priv); | ||
493 | |||
494 | if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) { | ||
495 | /* may need something similar for newer chips */ | ||
496 | clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX); | ||
497 | mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL); | ||
498 | |||
499 | RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl | | ||
500 | RADEON_FORCEON_MCLKA | | ||
501 | RADEON_FORCEON_MCLKB | | ||
502 | RADEON_FORCEON_YCLKA | | ||
503 | RADEON_FORCEON_YCLKB | | ||
504 | RADEON_FORCEON_MC | | ||
505 | RADEON_FORCEON_AIC)); | ||
506 | } | ||
507 | |||
508 | rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET); | ||
509 | |||
510 | RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset | | ||
511 | RADEON_SOFT_RESET_CP | | ||
512 | RADEON_SOFT_RESET_HI | | ||
513 | RADEON_SOFT_RESET_SE | | ||
514 | RADEON_SOFT_RESET_RE | | ||
515 | RADEON_SOFT_RESET_PP | | ||
516 | RADEON_SOFT_RESET_E2 | | ||
517 | RADEON_SOFT_RESET_RB)); | ||
518 | RADEON_READ(RADEON_RBBM_SOFT_RESET); | ||
519 | RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset & | ||
520 | ~(RADEON_SOFT_RESET_CP | | ||
521 | RADEON_SOFT_RESET_HI | | ||
522 | RADEON_SOFT_RESET_SE | | ||
523 | RADEON_SOFT_RESET_RE | | ||
524 | RADEON_SOFT_RESET_PP | | ||
525 | RADEON_SOFT_RESET_E2 | | ||
526 | RADEON_SOFT_RESET_RB))); | ||
527 | RADEON_READ(RADEON_RBBM_SOFT_RESET); | ||
528 | |||
529 | if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) { | ||
530 | RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl); | ||
531 | RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index); | ||
532 | RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset); | ||
533 | } | ||
534 | |||
535 | /* setup the raster pipes */ | ||
536 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300) | ||
537 | radeon_init_pipes(dev_priv); | ||
538 | |||
539 | /* Reset the CP ring */ | ||
540 | radeon_do_cp_reset(dev_priv); | ||
541 | |||
542 | /* The CP is no longer running after an engine reset */ | ||
543 | dev_priv->cp_running = 0; | ||
544 | |||
545 | /* Reset any pending vertex, indirect buffers */ | ||
546 | radeon_freelist_reset(dev); | ||
547 | |||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | static void radeon_cp_init_ring_buffer(struct drm_device * dev, | ||
552 | drm_radeon_private_t * dev_priv) | ||
553 | { | ||
554 | u32 ring_start, cur_read_ptr; | ||
555 | u32 tmp; | ||
556 | |||
557 | /* Initialize the memory controller. With new memory map, the fb location | ||
558 | * is not changed, it should have been properly initialized already. Part | ||
559 | * of the problem is that the code below is bogus, assuming the GART is | ||
560 | * always appended to the fb which is not necessarily the case | ||
561 | */ | ||
562 | if (!dev_priv->new_memmap) | ||
563 | radeon_write_fb_location(dev_priv, | ||
564 | ((dev_priv->gart_vm_start - 1) & 0xffff0000) | ||
565 | | (dev_priv->fb_location >> 16)); | ||
566 | |||
567 | #if __OS_HAS_AGP | ||
568 | if (dev_priv->flags & RADEON_IS_AGP) { | ||
569 | radeon_write_agp_base(dev_priv, dev->agp->base); | ||
570 | |||
571 | radeon_write_agp_location(dev_priv, | ||
572 | (((dev_priv->gart_vm_start - 1 + | ||
573 | dev_priv->gart_size) & 0xffff0000) | | ||
574 | (dev_priv->gart_vm_start >> 16))); | ||
575 | |||
576 | ring_start = (dev_priv->cp_ring->offset | ||
577 | - dev->agp->base | ||
578 | + dev_priv->gart_vm_start); | ||
579 | } else | ||
580 | #endif | ||
581 | ring_start = (dev_priv->cp_ring->offset | ||
582 | - (unsigned long)dev->sg->virtual | ||
583 | + dev_priv->gart_vm_start); | ||
584 | |||
585 | RADEON_WRITE(RADEON_CP_RB_BASE, ring_start); | ||
586 | |||
587 | /* Set the write pointer delay */ | ||
588 | RADEON_WRITE(RADEON_CP_RB_WPTR_DELAY, 0); | ||
589 | |||
590 | /* Initialize the ring buffer's read and write pointers */ | ||
591 | cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR); | ||
592 | RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr); | ||
593 | SET_RING_HEAD(dev_priv, cur_read_ptr); | ||
594 | dev_priv->ring.tail = cur_read_ptr; | ||
595 | |||
596 | #if __OS_HAS_AGP | ||
597 | if (dev_priv->flags & RADEON_IS_AGP) { | ||
598 | RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, | ||
599 | dev_priv->ring_rptr->offset | ||
600 | - dev->agp->base + dev_priv->gart_vm_start); | ||
601 | } else | ||
602 | #endif | ||
603 | { | ||
604 | struct drm_sg_mem *entry = dev->sg; | ||
605 | unsigned long tmp_ofs, page_ofs; | ||
606 | |||
607 | tmp_ofs = dev_priv->ring_rptr->offset - | ||
608 | (unsigned long)dev->sg->virtual; | ||
609 | page_ofs = tmp_ofs >> PAGE_SHIFT; | ||
610 | |||
611 | RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, entry->busaddr[page_ofs]); | ||
612 | DRM_DEBUG("ring rptr: offset=0x%08lx handle=0x%08lx\n", | ||
613 | (unsigned long)entry->busaddr[page_ofs], | ||
614 | entry->handle + tmp_ofs); | ||
615 | } | ||
616 | |||
617 | /* Set ring buffer size */ | ||
618 | #ifdef __BIG_ENDIAN | ||
619 | RADEON_WRITE(RADEON_CP_RB_CNTL, | ||
620 | RADEON_BUF_SWAP_32BIT | | ||
621 | (dev_priv->ring.fetch_size_l2ow << 18) | | ||
622 | (dev_priv->ring.rptr_update_l2qw << 8) | | ||
623 | dev_priv->ring.size_l2qw); | ||
624 | #else | ||
625 | RADEON_WRITE(RADEON_CP_RB_CNTL, | ||
626 | (dev_priv->ring.fetch_size_l2ow << 18) | | ||
627 | (dev_priv->ring.rptr_update_l2qw << 8) | | ||
628 | dev_priv->ring.size_l2qw); | ||
629 | #endif | ||
630 | |||
631 | /* Start with assuming that writeback doesn't work */ | ||
632 | dev_priv->writeback_works = 0; | ||
633 | |||
634 | /* Initialize the scratch register pointer. This will cause | ||
635 | * the scratch register values to be written out to memory | ||
636 | * whenever they are updated. | ||
637 | * | ||
638 | * We simply put this behind the ring read pointer, this works | ||
639 | * with PCI GART as well as (whatever kind of) AGP GART | ||
640 | */ | ||
641 | RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR) | ||
642 | + RADEON_SCRATCH_REG_OFFSET); | ||
643 | |||
644 | dev_priv->scratch = ((__volatile__ u32 *) | ||
645 | dev_priv->ring_rptr->handle + | ||
646 | (RADEON_SCRATCH_REG_OFFSET / sizeof(u32))); | ||
647 | |||
648 | RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7); | ||
649 | |||
650 | /* Turn on bus mastering */ | ||
651 | tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; | ||
652 | RADEON_WRITE(RADEON_BUS_CNTL, tmp); | ||
653 | |||
654 | dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0; | ||
655 | RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame); | ||
656 | |||
657 | dev_priv->sarea_priv->last_dispatch = dev_priv->scratch[1] = 0; | ||
658 | RADEON_WRITE(RADEON_LAST_DISPATCH_REG, | ||
659 | dev_priv->sarea_priv->last_dispatch); | ||
660 | |||
661 | dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0; | ||
662 | RADEON_WRITE(RADEON_LAST_CLEAR_REG, dev_priv->sarea_priv->last_clear); | ||
663 | |||
664 | radeon_do_wait_for_idle(dev_priv); | ||
665 | |||
666 | /* Sync everything up */ | ||
667 | RADEON_WRITE(RADEON_ISYNC_CNTL, | ||
668 | (RADEON_ISYNC_ANY2D_IDLE3D | | ||
669 | RADEON_ISYNC_ANY3D_IDLE2D | | ||
670 | RADEON_ISYNC_WAIT_IDLEGUI | | ||
671 | RADEON_ISYNC_CPSCRATCH_IDLEGUI)); | ||
672 | |||
673 | } | ||
674 | |||
675 | static void radeon_test_writeback(drm_radeon_private_t * dev_priv) | ||
676 | { | ||
677 | u32 tmp; | ||
678 | |||
679 | /* Writeback doesn't seem to work everywhere, test it here and possibly | ||
680 | * enable it if it appears to work | ||
681 | */ | ||
682 | DRM_WRITE32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1), 0); | ||
683 | RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef); | ||
684 | |||
685 | for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) { | ||
686 | if (DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)) == | ||
687 | 0xdeadbeef) | ||
688 | break; | ||
689 | DRM_UDELAY(1); | ||
690 | } | ||
691 | |||
692 | if (tmp < dev_priv->usec_timeout) { | ||
693 | dev_priv->writeback_works = 1; | ||
694 | DRM_INFO("writeback test succeeded in %d usecs\n", tmp); | ||
695 | } else { | ||
696 | dev_priv->writeback_works = 0; | ||
697 | DRM_INFO("writeback test failed\n"); | ||
698 | } | ||
699 | if (radeon_no_wb == 1) { | ||
700 | dev_priv->writeback_works = 0; | ||
701 | DRM_INFO("writeback forced off\n"); | ||
702 | } | ||
703 | |||
704 | if (!dev_priv->writeback_works) { | ||
705 | /* Disable writeback to avoid unnecessary bus master transfer */ | ||
706 | RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_READ(RADEON_CP_RB_CNTL) | | ||
707 | RADEON_RB_NO_UPDATE); | ||
708 | RADEON_WRITE(RADEON_SCRATCH_UMSK, 0); | ||
709 | } | ||
710 | } | ||
711 | |||
712 | /* Enable or disable IGP GART on the chip */ | ||
713 | static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on) | ||
714 | { | ||
715 | u32 temp; | ||
716 | |||
717 | if (on) { | ||
718 | DRM_DEBUG("programming igp gart %08X %08lX %08X\n", | ||
719 | dev_priv->gart_vm_start, | ||
720 | (long)dev_priv->gart_info.bus_addr, | ||
721 | dev_priv->gart_size); | ||
722 | |||
723 | temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL); | ||
724 | if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) | ||
725 | IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN | | ||
726 | RS690_BLOCK_GFX_D3_EN)); | ||
727 | else | ||
728 | IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN); | ||
729 | |||
730 | IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | | ||
731 | RS480_VA_SIZE_32MB)); | ||
732 | |||
733 | temp = IGP_READ_MCIND(dev_priv, RS480_GART_FEATURE_ID); | ||
734 | IGP_WRITE_MCIND(RS480_GART_FEATURE_ID, (RS480_HANG_EN | | ||
735 | RS480_TLB_ENABLE | | ||
736 | RS480_GTW_LAC_EN | | ||
737 | RS480_1LEVEL_GART)); | ||
738 | |||
739 | temp = dev_priv->gart_info.bus_addr & 0xfffff000; | ||
740 | temp |= (upper_32_bits(dev_priv->gart_info.bus_addr) & 0xff) << 4; | ||
741 | IGP_WRITE_MCIND(RS480_GART_BASE, temp); | ||
742 | |||
743 | temp = IGP_READ_MCIND(dev_priv, RS480_AGP_MODE_CNTL); | ||
744 | IGP_WRITE_MCIND(RS480_AGP_MODE_CNTL, ((1 << RS480_REQ_TYPE_SNOOP_SHIFT) | | ||
745 | RS480_REQ_TYPE_SNOOP_DIS)); | ||
746 | |||
747 | radeon_write_agp_base(dev_priv, dev_priv->gart_vm_start); | ||
748 | |||
749 | dev_priv->gart_size = 32*1024*1024; | ||
750 | temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) & | ||
751 | 0xffff0000) | (dev_priv->gart_vm_start >> 16)); | ||
752 | |||
753 | radeon_write_agp_location(dev_priv, temp); | ||
754 | |||
755 | temp = IGP_READ_MCIND(dev_priv, RS480_AGP_ADDRESS_SPACE_SIZE); | ||
756 | IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | | ||
757 | RS480_VA_SIZE_32MB)); | ||
758 | |||
759 | do { | ||
760 | temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL); | ||
761 | if ((temp & RS480_GART_CACHE_INVALIDATE) == 0) | ||
762 | break; | ||
763 | DRM_UDELAY(1); | ||
764 | } while (1); | ||
765 | |||
766 | IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, | ||
767 | RS480_GART_CACHE_INVALIDATE); | ||
768 | |||
769 | do { | ||
770 | temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL); | ||
771 | if ((temp & RS480_GART_CACHE_INVALIDATE) == 0) | ||
772 | break; | ||
773 | DRM_UDELAY(1); | ||
774 | } while (1); | ||
775 | |||
776 | IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, 0); | ||
777 | } else { | ||
778 | IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, 0); | ||
779 | } | ||
780 | } | ||
781 | |||
782 | static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on) | ||
783 | { | ||
784 | u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL); | ||
785 | if (on) { | ||
786 | |||
787 | DRM_DEBUG("programming pcie %08X %08lX %08X\n", | ||
788 | dev_priv->gart_vm_start, | ||
789 | (long)dev_priv->gart_info.bus_addr, | ||
790 | dev_priv->gart_size); | ||
791 | RADEON_WRITE_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, | ||
792 | dev_priv->gart_vm_start); | ||
793 | RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_BASE, | ||
794 | dev_priv->gart_info.bus_addr); | ||
795 | RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_START_LO, | ||
796 | dev_priv->gart_vm_start); | ||
797 | RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_END_LO, | ||
798 | dev_priv->gart_vm_start + | ||
799 | dev_priv->gart_size - 1); | ||
800 | |||
801 | radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */ | ||
802 | |||
803 | RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, | ||
804 | RADEON_PCIE_TX_GART_EN); | ||
805 | } else { | ||
806 | RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, | ||
807 | tmp & ~RADEON_PCIE_TX_GART_EN); | ||
808 | } | ||
809 | } | ||
810 | |||
811 | /* Enable or disable PCI GART on the chip */ | ||
812 | static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on) | ||
813 | { | ||
814 | u32 tmp; | ||
815 | |||
816 | if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || | ||
817 | (dev_priv->flags & RADEON_IS_IGPGART)) { | ||
818 | radeon_set_igpgart(dev_priv, on); | ||
819 | return; | ||
820 | } | ||
821 | |||
822 | if (dev_priv->flags & RADEON_IS_PCIE) { | ||
823 | radeon_set_pciegart(dev_priv, on); | ||
824 | return; | ||
825 | } | ||
826 | |||
827 | tmp = RADEON_READ(RADEON_AIC_CNTL); | ||
828 | |||
829 | if (on) { | ||
830 | RADEON_WRITE(RADEON_AIC_CNTL, | ||
831 | tmp | RADEON_PCIGART_TRANSLATE_EN); | ||
832 | |||
833 | /* set PCI GART page-table base address | ||
834 | */ | ||
835 | RADEON_WRITE(RADEON_AIC_PT_BASE, dev_priv->gart_info.bus_addr); | ||
836 | |||
837 | /* set address range for PCI address translate | ||
838 | */ | ||
839 | RADEON_WRITE(RADEON_AIC_LO_ADDR, dev_priv->gart_vm_start); | ||
840 | RADEON_WRITE(RADEON_AIC_HI_ADDR, dev_priv->gart_vm_start | ||
841 | + dev_priv->gart_size - 1); | ||
842 | |||
843 | /* Turn off AGP aperture -- is this required for PCI GART? | ||
844 | */ | ||
845 | radeon_write_agp_location(dev_priv, 0xffffffc0); | ||
846 | RADEON_WRITE(RADEON_AGP_COMMAND, 0); /* clear AGP_COMMAND */ | ||
847 | } else { | ||
848 | RADEON_WRITE(RADEON_AIC_CNTL, | ||
849 | tmp & ~RADEON_PCIGART_TRANSLATE_EN); | ||
850 | } | ||
851 | } | ||
852 | |||
853 | static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init) | ||
854 | { | ||
855 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
856 | |||
857 | DRM_DEBUG("\n"); | ||
858 | |||
859 | /* if we require new memory map but we don't have it fail */ | ||
860 | if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) { | ||
861 | DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n"); | ||
862 | radeon_do_cleanup_cp(dev); | ||
863 | return -EINVAL; | ||
864 | } | ||
865 | |||
866 | if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) { | ||
867 | DRM_DEBUG("Forcing AGP card to PCI mode\n"); | ||
868 | dev_priv->flags &= ~RADEON_IS_AGP; | ||
869 | } else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE)) | ||
870 | && !init->is_pci) { | ||
871 | DRM_DEBUG("Restoring AGP flag\n"); | ||
872 | dev_priv->flags |= RADEON_IS_AGP; | ||
873 | } | ||
874 | |||
875 | if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) { | ||
876 | DRM_ERROR("PCI GART memory not allocated!\n"); | ||
877 | radeon_do_cleanup_cp(dev); | ||
878 | return -EINVAL; | ||
879 | } | ||
880 | |||
881 | dev_priv->usec_timeout = init->usec_timeout; | ||
882 | if (dev_priv->usec_timeout < 1 || | ||
883 | dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) { | ||
884 | DRM_DEBUG("TIMEOUT problem!\n"); | ||
885 | radeon_do_cleanup_cp(dev); | ||
886 | return -EINVAL; | ||
887 | } | ||
888 | |||
889 | /* Enable vblank on CRTC1 for older X servers | ||
890 | */ | ||
891 | dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1; | ||
892 | |||
893 | switch(init->func) { | ||
894 | case RADEON_INIT_R200_CP: | ||
895 | dev_priv->microcode_version = UCODE_R200; | ||
896 | break; | ||
897 | case RADEON_INIT_R300_CP: | ||
898 | dev_priv->microcode_version = UCODE_R300; | ||
899 | break; | ||
900 | default: | ||
901 | dev_priv->microcode_version = UCODE_R100; | ||
902 | } | ||
903 | |||
904 | dev_priv->do_boxes = 0; | ||
905 | dev_priv->cp_mode = init->cp_mode; | ||
906 | |||
907 | /* We don't support anything other than bus-mastering ring mode, | ||
908 | * but the ring can be in either AGP or PCI space for the ring | ||
909 | * read pointer. | ||
910 | */ | ||
911 | if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) && | ||
912 | (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) { | ||
913 | DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode); | ||
914 | radeon_do_cleanup_cp(dev); | ||
915 | return -EINVAL; | ||
916 | } | ||
917 | |||
918 | switch (init->fb_bpp) { | ||
919 | case 16: | ||
920 | dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565; | ||
921 | break; | ||
922 | case 32: | ||
923 | default: | ||
924 | dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888; | ||
925 | break; | ||
926 | } | ||
927 | dev_priv->front_offset = init->front_offset; | ||
928 | dev_priv->front_pitch = init->front_pitch; | ||
929 | dev_priv->back_offset = init->back_offset; | ||
930 | dev_priv->back_pitch = init->back_pitch; | ||
931 | |||
932 | switch (init->depth_bpp) { | ||
933 | case 16: | ||
934 | dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z; | ||
935 | break; | ||
936 | case 32: | ||
937 | default: | ||
938 | dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z; | ||
939 | break; | ||
940 | } | ||
941 | dev_priv->depth_offset = init->depth_offset; | ||
942 | dev_priv->depth_pitch = init->depth_pitch; | ||
943 | |||
944 | /* Hardware state for depth clears. Remove this if/when we no | ||
945 | * longer clear the depth buffer with a 3D rectangle. Hard-code | ||
946 | * all values to prevent unwanted 3D state from slipping through | ||
947 | * and screwing with the clear operation. | ||
948 | */ | ||
949 | dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE | | ||
950 | (dev_priv->color_fmt << 10) | | ||
951 | (dev_priv->microcode_version == | ||
952 | UCODE_R100 ? RADEON_ZBLOCK16 : 0)); | ||
953 | |||
954 | dev_priv->depth_clear.rb3d_zstencilcntl = | ||
955 | (dev_priv->depth_fmt | | ||
956 | RADEON_Z_TEST_ALWAYS | | ||
957 | RADEON_STENCIL_TEST_ALWAYS | | ||
958 | RADEON_STENCIL_S_FAIL_REPLACE | | ||
959 | RADEON_STENCIL_ZPASS_REPLACE | | ||
960 | RADEON_STENCIL_ZFAIL_REPLACE | RADEON_Z_WRITE_ENABLE); | ||
961 | |||
962 | dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW | | ||
963 | RADEON_BFACE_SOLID | | ||
964 | RADEON_FFACE_SOLID | | ||
965 | RADEON_FLAT_SHADE_VTX_LAST | | ||
966 | RADEON_DIFFUSE_SHADE_FLAT | | ||
967 | RADEON_ALPHA_SHADE_FLAT | | ||
968 | RADEON_SPECULAR_SHADE_FLAT | | ||
969 | RADEON_FOG_SHADE_FLAT | | ||
970 | RADEON_VTX_PIX_CENTER_OGL | | ||
971 | RADEON_ROUND_MODE_TRUNC | | ||
972 | RADEON_ROUND_PREC_8TH_PIX); | ||
973 | |||
974 | |||
975 | dev_priv->ring_offset = init->ring_offset; | ||
976 | dev_priv->ring_rptr_offset = init->ring_rptr_offset; | ||
977 | dev_priv->buffers_offset = init->buffers_offset; | ||
978 | dev_priv->gart_textures_offset = init->gart_textures_offset; | ||
979 | |||
980 | dev_priv->sarea = drm_getsarea(dev); | ||
981 | if (!dev_priv->sarea) { | ||
982 | DRM_ERROR("could not find sarea!\n"); | ||
983 | radeon_do_cleanup_cp(dev); | ||
984 | return -EINVAL; | ||
985 | } | ||
986 | |||
987 | dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset); | ||
988 | if (!dev_priv->cp_ring) { | ||
989 | DRM_ERROR("could not find cp ring region!\n"); | ||
990 | radeon_do_cleanup_cp(dev); | ||
991 | return -EINVAL; | ||
992 | } | ||
993 | dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); | ||
994 | if (!dev_priv->ring_rptr) { | ||
995 | DRM_ERROR("could not find ring read pointer!\n"); | ||
996 | radeon_do_cleanup_cp(dev); | ||
997 | return -EINVAL; | ||
998 | } | ||
999 | dev->agp_buffer_token = init->buffers_offset; | ||
1000 | dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); | ||
1001 | if (!dev->agp_buffer_map) { | ||
1002 | DRM_ERROR("could not find dma buffer region!\n"); | ||
1003 | radeon_do_cleanup_cp(dev); | ||
1004 | return -EINVAL; | ||
1005 | } | ||
1006 | |||
1007 | if (init->gart_textures_offset) { | ||
1008 | dev_priv->gart_textures = | ||
1009 | drm_core_findmap(dev, init->gart_textures_offset); | ||
1010 | if (!dev_priv->gart_textures) { | ||
1011 | DRM_ERROR("could not find GART texture region!\n"); | ||
1012 | radeon_do_cleanup_cp(dev); | ||
1013 | return -EINVAL; | ||
1014 | } | ||
1015 | } | ||
1016 | |||
1017 | dev_priv->sarea_priv = | ||
1018 | (drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->handle + | ||
1019 | init->sarea_priv_offset); | ||
1020 | |||
1021 | #if __OS_HAS_AGP | ||
1022 | if (dev_priv->flags & RADEON_IS_AGP) { | ||
1023 | drm_core_ioremap(dev_priv->cp_ring, dev); | ||
1024 | drm_core_ioremap(dev_priv->ring_rptr, dev); | ||
1025 | drm_core_ioremap(dev->agp_buffer_map, dev); | ||
1026 | if (!dev_priv->cp_ring->handle || | ||
1027 | !dev_priv->ring_rptr->handle || | ||
1028 | !dev->agp_buffer_map->handle) { | ||
1029 | DRM_ERROR("could not find ioremap agp regions!\n"); | ||
1030 | radeon_do_cleanup_cp(dev); | ||
1031 | return -EINVAL; | ||
1032 | } | ||
1033 | } else | ||
1034 | #endif | ||
1035 | { | ||
1036 | dev_priv->cp_ring->handle = (void *)dev_priv->cp_ring->offset; | ||
1037 | dev_priv->ring_rptr->handle = | ||
1038 | (void *)dev_priv->ring_rptr->offset; | ||
1039 | dev->agp_buffer_map->handle = | ||
1040 | (void *)dev->agp_buffer_map->offset; | ||
1041 | |||
1042 | DRM_DEBUG("dev_priv->cp_ring->handle %p\n", | ||
1043 | dev_priv->cp_ring->handle); | ||
1044 | DRM_DEBUG("dev_priv->ring_rptr->handle %p\n", | ||
1045 | dev_priv->ring_rptr->handle); | ||
1046 | DRM_DEBUG("dev->agp_buffer_map->handle %p\n", | ||
1047 | dev->agp_buffer_map->handle); | ||
1048 | } | ||
1049 | |||
1050 | dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16; | ||
1051 | dev_priv->fb_size = | ||
1052 | ((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000) | ||
1053 | - dev_priv->fb_location; | ||
1054 | |||
1055 | dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) | | ||
1056 | ((dev_priv->front_offset | ||
1057 | + dev_priv->fb_location) >> 10)); | ||
1058 | |||
1059 | dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) | | ||
1060 | ((dev_priv->back_offset | ||
1061 | + dev_priv->fb_location) >> 10)); | ||
1062 | |||
1063 | dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) | | ||
1064 | ((dev_priv->depth_offset | ||
1065 | + dev_priv->fb_location) >> 10)); | ||
1066 | |||
1067 | dev_priv->gart_size = init->gart_size; | ||
1068 | |||
1069 | /* New let's set the memory map ... */ | ||
1070 | if (dev_priv->new_memmap) { | ||
1071 | u32 base = 0; | ||
1072 | |||
1073 | DRM_INFO("Setting GART location based on new memory map\n"); | ||
1074 | |||
1075 | /* If using AGP, try to locate the AGP aperture at the same | ||
1076 | * location in the card and on the bus, though we have to | ||
1077 | * align it down. | ||
1078 | */ | ||
1079 | #if __OS_HAS_AGP | ||
1080 | if (dev_priv->flags & RADEON_IS_AGP) { | ||
1081 | base = dev->agp->base; | ||
1082 | /* Check if valid */ | ||
1083 | if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location && | ||
1084 | base < (dev_priv->fb_location + dev_priv->fb_size - 1)) { | ||
1085 | DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n", | ||
1086 | dev->agp->base); | ||
1087 | base = 0; | ||
1088 | } | ||
1089 | } | ||
1090 | #endif | ||
1091 | /* If not or if AGP is at 0 (Macs), try to put it elsewhere */ | ||
1092 | if (base == 0) { | ||
1093 | base = dev_priv->fb_location + dev_priv->fb_size; | ||
1094 | if (base < dev_priv->fb_location || | ||
1095 | ((base + dev_priv->gart_size) & 0xfffffffful) < base) | ||
1096 | base = dev_priv->fb_location | ||
1097 | - dev_priv->gart_size; | ||
1098 | } | ||
1099 | dev_priv->gart_vm_start = base & 0xffc00000u; | ||
1100 | if (dev_priv->gart_vm_start != base) | ||
1101 | DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n", | ||
1102 | base, dev_priv->gart_vm_start); | ||
1103 | } else { | ||
1104 | DRM_INFO("Setting GART location based on old memory map\n"); | ||
1105 | dev_priv->gart_vm_start = dev_priv->fb_location + | ||
1106 | RADEON_READ(RADEON_CONFIG_APER_SIZE); | ||
1107 | } | ||
1108 | |||
1109 | #if __OS_HAS_AGP | ||
1110 | if (dev_priv->flags & RADEON_IS_AGP) | ||
1111 | dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset | ||
1112 | - dev->agp->base | ||
1113 | + dev_priv->gart_vm_start); | ||
1114 | else | ||
1115 | #endif | ||
1116 | dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset | ||
1117 | - (unsigned long)dev->sg->virtual | ||
1118 | + dev_priv->gart_vm_start); | ||
1119 | |||
1120 | DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size); | ||
1121 | DRM_DEBUG("dev_priv->gart_vm_start 0x%x\n", dev_priv->gart_vm_start); | ||
1122 | DRM_DEBUG("dev_priv->gart_buffers_offset 0x%lx\n", | ||
1123 | dev_priv->gart_buffers_offset); | ||
1124 | |||
1125 | dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle; | ||
1126 | dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle | ||
1127 | + init->ring_size / sizeof(u32)); | ||
1128 | dev_priv->ring.size = init->ring_size; | ||
1129 | dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8); | ||
1130 | |||
1131 | dev_priv->ring.rptr_update = /* init->rptr_update */ 4096; | ||
1132 | dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8); | ||
1133 | |||
1134 | dev_priv->ring.fetch_size = /* init->fetch_size */ 32; | ||
1135 | dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16); | ||
1136 | dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; | ||
1137 | |||
1138 | dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; | ||
1139 | |||
1140 | #if __OS_HAS_AGP | ||
1141 | if (dev_priv->flags & RADEON_IS_AGP) { | ||
1142 | /* Turn off PCI GART */ | ||
1143 | radeon_set_pcigart(dev_priv, 0); | ||
1144 | } else | ||
1145 | #endif | ||
1146 | { | ||
1147 | dev_priv->gart_info.table_mask = DMA_BIT_MASK(32); | ||
1148 | /* if we have an offset set from userspace */ | ||
1149 | if (dev_priv->pcigart_offset_set) { | ||
1150 | dev_priv->gart_info.bus_addr = | ||
1151 | dev_priv->pcigart_offset + dev_priv->fb_location; | ||
1152 | dev_priv->gart_info.mapping.offset = | ||
1153 | dev_priv->pcigart_offset + dev_priv->fb_aper_offset; | ||
1154 | dev_priv->gart_info.mapping.size = | ||
1155 | dev_priv->gart_info.table_size; | ||
1156 | |||
1157 | drm_core_ioremap(&dev_priv->gart_info.mapping, dev); | ||
1158 | dev_priv->gart_info.addr = | ||
1159 | dev_priv->gart_info.mapping.handle; | ||
1160 | |||
1161 | if (dev_priv->flags & RADEON_IS_PCIE) | ||
1162 | dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE; | ||
1163 | else | ||
1164 | dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI; | ||
1165 | dev_priv->gart_info.gart_table_location = | ||
1166 | DRM_ATI_GART_FB; | ||
1167 | |||
1168 | DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n", | ||
1169 | dev_priv->gart_info.addr, | ||
1170 | dev_priv->pcigart_offset); | ||
1171 | } else { | ||
1172 | if (dev_priv->flags & RADEON_IS_IGPGART) | ||
1173 | dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP; | ||
1174 | else | ||
1175 | dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI; | ||
1176 | dev_priv->gart_info.gart_table_location = | ||
1177 | DRM_ATI_GART_MAIN; | ||
1178 | dev_priv->gart_info.addr = NULL; | ||
1179 | dev_priv->gart_info.bus_addr = 0; | ||
1180 | if (dev_priv->flags & RADEON_IS_PCIE) { | ||
1181 | DRM_ERROR | ||
1182 | ("Cannot use PCI Express without GART in FB memory\n"); | ||
1183 | radeon_do_cleanup_cp(dev); | ||
1184 | return -EINVAL; | ||
1185 | } | ||
1186 | } | ||
1187 | |||
1188 | if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) { | ||
1189 | DRM_ERROR("failed to init PCI GART!\n"); | ||
1190 | radeon_do_cleanup_cp(dev); | ||
1191 | return -ENOMEM; | ||
1192 | } | ||
1193 | |||
1194 | /* Turn on PCI GART */ | ||
1195 | radeon_set_pcigart(dev_priv, 1); | ||
1196 | } | ||
1197 | |||
1198 | radeon_cp_load_microcode(dev_priv); | ||
1199 | radeon_cp_init_ring_buffer(dev, dev_priv); | ||
1200 | |||
1201 | dev_priv->last_buf = 0; | ||
1202 | |||
1203 | radeon_do_engine_reset(dev); | ||
1204 | radeon_test_writeback(dev_priv); | ||
1205 | |||
1206 | return 0; | ||
1207 | } | ||
1208 | |||
1209 | static int radeon_do_cleanup_cp(struct drm_device * dev) | ||
1210 | { | ||
1211 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1212 | DRM_DEBUG("\n"); | ||
1213 | |||
1214 | /* Make sure interrupts are disabled here because the uninstall ioctl | ||
1215 | * may not have been called from userspace and after dev_private | ||
1216 | * is freed, it's too late. | ||
1217 | */ | ||
1218 | if (dev->irq_enabled) | ||
1219 | drm_irq_uninstall(dev); | ||
1220 | |||
1221 | #if __OS_HAS_AGP | ||
1222 | if (dev_priv->flags & RADEON_IS_AGP) { | ||
1223 | if (dev_priv->cp_ring != NULL) { | ||
1224 | drm_core_ioremapfree(dev_priv->cp_ring, dev); | ||
1225 | dev_priv->cp_ring = NULL; | ||
1226 | } | ||
1227 | if (dev_priv->ring_rptr != NULL) { | ||
1228 | drm_core_ioremapfree(dev_priv->ring_rptr, dev); | ||
1229 | dev_priv->ring_rptr = NULL; | ||
1230 | } | ||
1231 | if (dev->agp_buffer_map != NULL) { | ||
1232 | drm_core_ioremapfree(dev->agp_buffer_map, dev); | ||
1233 | dev->agp_buffer_map = NULL; | ||
1234 | } | ||
1235 | } else | ||
1236 | #endif | ||
1237 | { | ||
1238 | |||
1239 | if (dev_priv->gart_info.bus_addr) { | ||
1240 | /* Turn off PCI GART */ | ||
1241 | radeon_set_pcigart(dev_priv, 0); | ||
1242 | if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info)) | ||
1243 | DRM_ERROR("failed to cleanup PCI GART!\n"); | ||
1244 | } | ||
1245 | |||
1246 | if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) | ||
1247 | { | ||
1248 | drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev); | ||
1249 | dev_priv->gart_info.addr = 0; | ||
1250 | } | ||
1251 | } | ||
1252 | /* only clear to the start of flags */ | ||
1253 | memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags)); | ||
1254 | |||
1255 | return 0; | ||
1256 | } | ||
1257 | |||
1258 | /* This code will reinit the Radeon CP hardware after a resume from disc. | ||
1259 | * AFAIK, it would be very difficult to pickle the state at suspend time, so | ||
1260 | * here we make sure that all Radeon hardware initialisation is re-done without | ||
1261 | * affecting running applications. | ||
1262 | * | ||
1263 | * Charl P. Botha <http://cpbotha.net> | ||
1264 | */ | ||
1265 | static int radeon_do_resume_cp(struct drm_device * dev) | ||
1266 | { | ||
1267 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1268 | |||
1269 | if (!dev_priv) { | ||
1270 | DRM_ERROR("Called with no initialization\n"); | ||
1271 | return -EINVAL; | ||
1272 | } | ||
1273 | |||
1274 | DRM_DEBUG("Starting radeon_do_resume_cp()\n"); | ||
1275 | |||
1276 | #if __OS_HAS_AGP | ||
1277 | if (dev_priv->flags & RADEON_IS_AGP) { | ||
1278 | /* Turn off PCI GART */ | ||
1279 | radeon_set_pcigart(dev_priv, 0); | ||
1280 | } else | ||
1281 | #endif | ||
1282 | { | ||
1283 | /* Turn on PCI GART */ | ||
1284 | radeon_set_pcigart(dev_priv, 1); | ||
1285 | } | ||
1286 | |||
1287 | radeon_cp_load_microcode(dev_priv); | ||
1288 | radeon_cp_init_ring_buffer(dev, dev_priv); | ||
1289 | |||
1290 | radeon_do_engine_reset(dev); | ||
1291 | radeon_enable_interrupt(dev); | ||
1292 | |||
1293 | DRM_DEBUG("radeon_do_resume_cp() complete\n"); | ||
1294 | |||
1295 | return 0; | ||
1296 | } | ||
1297 | |||
1298 | int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv) | ||
1299 | { | ||
1300 | drm_radeon_init_t *init = data; | ||
1301 | |||
1302 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
1303 | |||
1304 | if (init->func == RADEON_INIT_R300_CP) | ||
1305 | r300_init_reg_flags(dev); | ||
1306 | |||
1307 | switch (init->func) { | ||
1308 | case RADEON_INIT_CP: | ||
1309 | case RADEON_INIT_R200_CP: | ||
1310 | case RADEON_INIT_R300_CP: | ||
1311 | return radeon_do_init_cp(dev, init); | ||
1312 | case RADEON_CLEANUP_CP: | ||
1313 | return radeon_do_cleanup_cp(dev); | ||
1314 | } | ||
1315 | |||
1316 | return -EINVAL; | ||
1317 | } | ||
1318 | |||
1319 | int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv) | ||
1320 | { | ||
1321 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1322 | DRM_DEBUG("\n"); | ||
1323 | |||
1324 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
1325 | |||
1326 | if (dev_priv->cp_running) { | ||
1327 | DRM_DEBUG("while CP running\n"); | ||
1328 | return 0; | ||
1329 | } | ||
1330 | if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) { | ||
1331 | DRM_DEBUG("called with bogus CP mode (%d)\n", | ||
1332 | dev_priv->cp_mode); | ||
1333 | return 0; | ||
1334 | } | ||
1335 | |||
1336 | radeon_do_cp_start(dev_priv); | ||
1337 | |||
1338 | return 0; | ||
1339 | } | ||
1340 | |||
1341 | /* Stop the CP. The engine must have been idled before calling this | ||
1342 | * routine. | ||
1343 | */ | ||
1344 | int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv) | ||
1345 | { | ||
1346 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1347 | drm_radeon_cp_stop_t *stop = data; | ||
1348 | int ret; | ||
1349 | DRM_DEBUG("\n"); | ||
1350 | |||
1351 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
1352 | |||
1353 | if (!dev_priv->cp_running) | ||
1354 | return 0; | ||
1355 | |||
1356 | /* Flush any pending CP commands. This ensures any outstanding | ||
1357 | * commands are exectuted by the engine before we turn it off. | ||
1358 | */ | ||
1359 | if (stop->flush) { | ||
1360 | radeon_do_cp_flush(dev_priv); | ||
1361 | } | ||
1362 | |||
1363 | /* If we fail to make the engine go idle, we return an error | ||
1364 | * code so that the DRM ioctl wrapper can try again. | ||
1365 | */ | ||
1366 | if (stop->idle) { | ||
1367 | ret = radeon_do_cp_idle(dev_priv); | ||
1368 | if (ret) | ||
1369 | return ret; | ||
1370 | } | ||
1371 | |||
1372 | /* Finally, we can turn off the CP. If the engine isn't idle, | ||
1373 | * we will get some dropped triangles as they won't be fully | ||
1374 | * rendered before the CP is shut down. | ||
1375 | */ | ||
1376 | radeon_do_cp_stop(dev_priv); | ||
1377 | |||
1378 | /* Reset the engine */ | ||
1379 | radeon_do_engine_reset(dev); | ||
1380 | |||
1381 | return 0; | ||
1382 | } | ||
1383 | |||
1384 | void radeon_do_release(struct drm_device * dev) | ||
1385 | { | ||
1386 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1387 | int i, ret; | ||
1388 | |||
1389 | if (dev_priv) { | ||
1390 | if (dev_priv->cp_running) { | ||
1391 | /* Stop the cp */ | ||
1392 | while ((ret = radeon_do_cp_idle(dev_priv)) != 0) { | ||
1393 | DRM_DEBUG("radeon_do_cp_idle %d\n", ret); | ||
1394 | #ifdef __linux__ | ||
1395 | schedule(); | ||
1396 | #else | ||
1397 | tsleep(&ret, PZERO, "rdnrel", 1); | ||
1398 | #endif | ||
1399 | } | ||
1400 | radeon_do_cp_stop(dev_priv); | ||
1401 | radeon_do_engine_reset(dev); | ||
1402 | } | ||
1403 | |||
1404 | /* Disable *all* interrupts */ | ||
1405 | if (dev_priv->mmio) /* remove this after permanent addmaps */ | ||
1406 | RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); | ||
1407 | |||
1408 | if (dev_priv->mmio) { /* remove all surfaces */ | ||
1409 | for (i = 0; i < RADEON_MAX_SURFACES; i++) { | ||
1410 | RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, 0); | ||
1411 | RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + | ||
1412 | 16 * i, 0); | ||
1413 | RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + | ||
1414 | 16 * i, 0); | ||
1415 | } | ||
1416 | } | ||
1417 | |||
1418 | /* Free memory heap structures */ | ||
1419 | radeon_mem_takedown(&(dev_priv->gart_heap)); | ||
1420 | radeon_mem_takedown(&(dev_priv->fb_heap)); | ||
1421 | |||
1422 | /* deallocate kernel resources */ | ||
1423 | radeon_do_cleanup_cp(dev); | ||
1424 | } | ||
1425 | } | ||
1426 | |||
1427 | /* Just reset the CP ring. Called as part of an X Server engine reset. | ||
1428 | */ | ||
1429 | int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) | ||
1430 | { | ||
1431 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1432 | DRM_DEBUG("\n"); | ||
1433 | |||
1434 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
1435 | |||
1436 | if (!dev_priv) { | ||
1437 | DRM_DEBUG("called before init done\n"); | ||
1438 | return -EINVAL; | ||
1439 | } | ||
1440 | |||
1441 | radeon_do_cp_reset(dev_priv); | ||
1442 | |||
1443 | /* The CP is no longer running after an engine reset */ | ||
1444 | dev_priv->cp_running = 0; | ||
1445 | |||
1446 | return 0; | ||
1447 | } | ||
1448 | |||
1449 | int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv) | ||
1450 | { | ||
1451 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1452 | DRM_DEBUG("\n"); | ||
1453 | |||
1454 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
1455 | |||
1456 | return radeon_do_cp_idle(dev_priv); | ||
1457 | } | ||
1458 | |||
1459 | /* Added by Charl P. Botha to call radeon_do_resume_cp(). | ||
1460 | */ | ||
1461 | int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv) | ||
1462 | { | ||
1463 | |||
1464 | return radeon_do_resume_cp(dev); | ||
1465 | } | ||
1466 | |||
1467 | int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) | ||
1468 | { | ||
1469 | DRM_DEBUG("\n"); | ||
1470 | |||
1471 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
1472 | |||
1473 | return radeon_do_engine_reset(dev); | ||
1474 | } | ||
1475 | |||
1476 | /* ================================================================ | ||
1477 | * Fullscreen mode | ||
1478 | */ | ||
1479 | |||
1480 | /* KW: Deprecated to say the least: | ||
1481 | */ | ||
1482 | int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv) | ||
1483 | { | ||
1484 | return 0; | ||
1485 | } | ||
1486 | |||
1487 | /* ================================================================ | ||
1488 | * Freelist management | ||
1489 | */ | ||
1490 | |||
1491 | /* Original comment: FIXME: ROTATE_BUFS is a hack to cycle through | ||
1492 | * bufs until freelist code is used. Note this hides a problem with | ||
1493 | * the scratch register * (used to keep track of last buffer | ||
1494 | * completed) being written to before * the last buffer has actually | ||
1495 | * completed rendering. | ||
1496 | * | ||
1497 | * KW: It's also a good way to find free buffers quickly. | ||
1498 | * | ||
1499 | * KW: Ideally this loop wouldn't exist, and freelist_get wouldn't | ||
1500 | * sleep. However, bugs in older versions of radeon_accel.c mean that | ||
1501 | * we essentially have to do this, else old clients will break. | ||
1502 | * | ||
1503 | * However, it does leave open a potential deadlock where all the | ||
1504 | * buffers are held by other clients, which can't release them because | ||
1505 | * they can't get the lock. | ||
1506 | */ | ||
1507 | |||
1508 | struct drm_buf *radeon_freelist_get(struct drm_device * dev) | ||
1509 | { | ||
1510 | struct drm_device_dma *dma = dev->dma; | ||
1511 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1512 | drm_radeon_buf_priv_t *buf_priv; | ||
1513 | struct drm_buf *buf; | ||
1514 | int i, t; | ||
1515 | int start; | ||
1516 | |||
1517 | if (++dev_priv->last_buf >= dma->buf_count) | ||
1518 | dev_priv->last_buf = 0; | ||
1519 | |||
1520 | start = dev_priv->last_buf; | ||
1521 | |||
1522 | for (t = 0; t < dev_priv->usec_timeout; t++) { | ||
1523 | u32 done_age = GET_SCRATCH(1); | ||
1524 | DRM_DEBUG("done_age = %d\n", done_age); | ||
1525 | for (i = start; i < dma->buf_count; i++) { | ||
1526 | buf = dma->buflist[i]; | ||
1527 | buf_priv = buf->dev_private; | ||
1528 | if (buf->file_priv == NULL || (buf->pending && | ||
1529 | buf_priv->age <= | ||
1530 | done_age)) { | ||
1531 | dev_priv->stats.requested_bufs++; | ||
1532 | buf->pending = 0; | ||
1533 | return buf; | ||
1534 | } | ||
1535 | start = 0; | ||
1536 | } | ||
1537 | |||
1538 | if (t) { | ||
1539 | DRM_UDELAY(1); | ||
1540 | dev_priv->stats.freelist_loops++; | ||
1541 | } | ||
1542 | } | ||
1543 | |||
1544 | DRM_DEBUG("returning NULL!\n"); | ||
1545 | return NULL; | ||
1546 | } | ||
1547 | |||
1548 | #if 0 | ||
1549 | struct drm_buf *radeon_freelist_get(struct drm_device * dev) | ||
1550 | { | ||
1551 | struct drm_device_dma *dma = dev->dma; | ||
1552 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1553 | drm_radeon_buf_priv_t *buf_priv; | ||
1554 | struct drm_buf *buf; | ||
1555 | int i, t; | ||
1556 | int start; | ||
1557 | u32 done_age = DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)); | ||
1558 | |||
1559 | if (++dev_priv->last_buf >= dma->buf_count) | ||
1560 | dev_priv->last_buf = 0; | ||
1561 | |||
1562 | start = dev_priv->last_buf; | ||
1563 | dev_priv->stats.freelist_loops++; | ||
1564 | |||
1565 | for (t = 0; t < 2; t++) { | ||
1566 | for (i = start; i < dma->buf_count; i++) { | ||
1567 | buf = dma->buflist[i]; | ||
1568 | buf_priv = buf->dev_private; | ||
1569 | if (buf->file_priv == 0 || (buf->pending && | ||
1570 | buf_priv->age <= | ||
1571 | done_age)) { | ||
1572 | dev_priv->stats.requested_bufs++; | ||
1573 | buf->pending = 0; | ||
1574 | return buf; | ||
1575 | } | ||
1576 | } | ||
1577 | start = 0; | ||
1578 | } | ||
1579 | |||
1580 | return NULL; | ||
1581 | } | ||
1582 | #endif | ||
1583 | |||
1584 | void radeon_freelist_reset(struct drm_device * dev) | ||
1585 | { | ||
1586 | struct drm_device_dma *dma = dev->dma; | ||
1587 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1588 | int i; | ||
1589 | |||
1590 | dev_priv->last_buf = 0; | ||
1591 | for (i = 0; i < dma->buf_count; i++) { | ||
1592 | struct drm_buf *buf = dma->buflist[i]; | ||
1593 | drm_radeon_buf_priv_t *buf_priv = buf->dev_private; | ||
1594 | buf_priv->age = 0; | ||
1595 | } | ||
1596 | } | ||
1597 | |||
1598 | /* ================================================================ | ||
1599 | * CP command submission | ||
1600 | */ | ||
1601 | |||
1602 | int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n) | ||
1603 | { | ||
1604 | drm_radeon_ring_buffer_t *ring = &dev_priv->ring; | ||
1605 | int i; | ||
1606 | u32 last_head = GET_RING_HEAD(dev_priv); | ||
1607 | |||
1608 | for (i = 0; i < dev_priv->usec_timeout; i++) { | ||
1609 | u32 head = GET_RING_HEAD(dev_priv); | ||
1610 | |||
1611 | ring->space = (head - ring->tail) * sizeof(u32); | ||
1612 | if (ring->space <= 0) | ||
1613 | ring->space += ring->size; | ||
1614 | if (ring->space > n) | ||
1615 | return 0; | ||
1616 | |||
1617 | dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; | ||
1618 | |||
1619 | if (head != last_head) | ||
1620 | i = 0; | ||
1621 | last_head = head; | ||
1622 | |||
1623 | DRM_UDELAY(1); | ||
1624 | } | ||
1625 | |||
1626 | /* FIXME: This return value is ignored in the BEGIN_RING macro! */ | ||
1627 | #if RADEON_FIFO_DEBUG | ||
1628 | radeon_status(dev_priv); | ||
1629 | DRM_ERROR("failed!\n"); | ||
1630 | #endif | ||
1631 | return -EBUSY; | ||
1632 | } | ||
1633 | |||
1634 | static int radeon_cp_get_buffers(struct drm_device *dev, | ||
1635 | struct drm_file *file_priv, | ||
1636 | struct drm_dma * d) | ||
1637 | { | ||
1638 | int i; | ||
1639 | struct drm_buf *buf; | ||
1640 | |||
1641 | for (i = d->granted_count; i < d->request_count; i++) { | ||
1642 | buf = radeon_freelist_get(dev); | ||
1643 | if (!buf) | ||
1644 | return -EBUSY; /* NOTE: broken client */ | ||
1645 | |||
1646 | buf->file_priv = file_priv; | ||
1647 | |||
1648 | if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, | ||
1649 | sizeof(buf->idx))) | ||
1650 | return -EFAULT; | ||
1651 | if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, | ||
1652 | sizeof(buf->total))) | ||
1653 | return -EFAULT; | ||
1654 | |||
1655 | d->granted_count++; | ||
1656 | } | ||
1657 | return 0; | ||
1658 | } | ||
1659 | |||
1660 | int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) | ||
1661 | { | ||
1662 | struct drm_device_dma *dma = dev->dma; | ||
1663 | int ret = 0; | ||
1664 | struct drm_dma *d = data; | ||
1665 | |||
1666 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
1667 | |||
1668 | /* Please don't send us buffers. | ||
1669 | */ | ||
1670 | if (d->send_count != 0) { | ||
1671 | DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", | ||
1672 | DRM_CURRENTPID, d->send_count); | ||
1673 | return -EINVAL; | ||
1674 | } | ||
1675 | |||
1676 | /* We'll send you buffers. | ||
1677 | */ | ||
1678 | if (d->request_count < 0 || d->request_count > dma->buf_count) { | ||
1679 | DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", | ||
1680 | DRM_CURRENTPID, d->request_count, dma->buf_count); | ||
1681 | return -EINVAL; | ||
1682 | } | ||
1683 | |||
1684 | d->granted_count = 0; | ||
1685 | |||
1686 | if (d->request_count) { | ||
1687 | ret = radeon_cp_get_buffers(dev, file_priv, d); | ||
1688 | } | ||
1689 | |||
1690 | return ret; | ||
1691 | } | ||
1692 | |||
1693 | int radeon_driver_load(struct drm_device *dev, unsigned long flags) | ||
1694 | { | ||
1695 | drm_radeon_private_t *dev_priv; | ||
1696 | int ret = 0; | ||
1697 | |||
1698 | dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER); | ||
1699 | if (dev_priv == NULL) | ||
1700 | return -ENOMEM; | ||
1701 | |||
1702 | memset(dev_priv, 0, sizeof(drm_radeon_private_t)); | ||
1703 | dev->dev_private = (void *)dev_priv; | ||
1704 | dev_priv->flags = flags; | ||
1705 | |||
1706 | switch (flags & RADEON_FAMILY_MASK) { | ||
1707 | case CHIP_R100: | ||
1708 | case CHIP_RV200: | ||
1709 | case CHIP_R200: | ||
1710 | case CHIP_R300: | ||
1711 | case CHIP_R350: | ||
1712 | case CHIP_R420: | ||
1713 | case CHIP_RV410: | ||
1714 | case CHIP_RV515: | ||
1715 | case CHIP_R520: | ||
1716 | case CHIP_RV570: | ||
1717 | case CHIP_R580: | ||
1718 | dev_priv->flags |= RADEON_HAS_HIERZ; | ||
1719 | break; | ||
1720 | default: | ||
1721 | /* all other chips have no hierarchical z buffer */ | ||
1722 | break; | ||
1723 | } | ||
1724 | |||
1725 | if (drm_device_is_agp(dev)) | ||
1726 | dev_priv->flags |= RADEON_IS_AGP; | ||
1727 | else if (drm_device_is_pcie(dev)) | ||
1728 | dev_priv->flags |= RADEON_IS_PCIE; | ||
1729 | else | ||
1730 | dev_priv->flags |= RADEON_IS_PCI; | ||
1731 | |||
1732 | DRM_DEBUG("%s card detected\n", | ||
1733 | ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI")))); | ||
1734 | return ret; | ||
1735 | } | ||
1736 | |||
1737 | /* Create mappings for registers and framebuffer so userland doesn't necessarily | ||
1738 | * have to find them. | ||
1739 | */ | ||
1740 | int radeon_driver_firstopen(struct drm_device *dev) | ||
1741 | { | ||
1742 | int ret; | ||
1743 | drm_local_map_t *map; | ||
1744 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1745 | |||
1746 | dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; | ||
1747 | |||
1748 | ret = drm_addmap(dev, drm_get_resource_start(dev, 2), | ||
1749 | drm_get_resource_len(dev, 2), _DRM_REGISTERS, | ||
1750 | _DRM_READ_ONLY, &dev_priv->mmio); | ||
1751 | if (ret != 0) | ||
1752 | return ret; | ||
1753 | |||
1754 | dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0); | ||
1755 | ret = drm_addmap(dev, dev_priv->fb_aper_offset, | ||
1756 | drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER, | ||
1757 | _DRM_WRITE_COMBINING, &map); | ||
1758 | if (ret != 0) | ||
1759 | return ret; | ||
1760 | |||
1761 | return 0; | ||
1762 | } | ||
1763 | |||
1764 | int radeon_driver_unload(struct drm_device *dev) | ||
1765 | { | ||
1766 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1767 | |||
1768 | DRM_DEBUG("\n"); | ||
1769 | drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER); | ||
1770 | |||
1771 | dev->dev_private = NULL; | ||
1772 | return 0; | ||
1773 | } | ||