diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_cp.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_cp.c | 2243 |
1 files changed, 0 insertions, 2243 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c deleted file mode 100644 index 500287eff55d..000000000000 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ /dev/null | |||
@@ -1,2243 +0,0 @@ | |||
1 | /* radeon_cp.c -- CP support for Radeon -*- linux-c -*- */ | ||
2 | /* | ||
3 | * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. | ||
4 | * Copyright 2000 VA Linux Systems, Inc., Fremont, California. | ||
5 | * Copyright 2007 Advanced Micro Devices, Inc. | ||
6 | * All Rights Reserved. | ||
7 | * | ||
8 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
23 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
24 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
25 | * DEALINGS IN THE SOFTWARE. | ||
26 | * | ||
27 | * Authors: | ||
28 | * Kevin E. Martin <martin@valinux.com> | ||
29 | * Gareth Hughes <gareth@valinux.com> | ||
30 | * | ||
31 | * ------------------------ This file is DEPRECATED! ------------------------- | ||
32 | */ | ||
33 | |||
34 | #include <linux/module.h> | ||
35 | |||
36 | #include <drm/drmP.h> | ||
37 | #include <drm/radeon_drm.h> | ||
38 | #include "radeon_drv.h" | ||
39 | #include "r300_reg.h" | ||
40 | |||
41 | #define RADEON_FIFO_DEBUG 0 | ||
42 | |||
43 | /* Firmware Names */ | ||
44 | #define FIRMWARE_R100 "radeon/R100_cp.bin" | ||
45 | #define FIRMWARE_R200 "radeon/R200_cp.bin" | ||
46 | #define FIRMWARE_R300 "radeon/R300_cp.bin" | ||
47 | #define FIRMWARE_R420 "radeon/R420_cp.bin" | ||
48 | #define FIRMWARE_RS690 "radeon/RS690_cp.bin" | ||
49 | #define FIRMWARE_RS600 "radeon/RS600_cp.bin" | ||
50 | #define FIRMWARE_R520 "radeon/R520_cp.bin" | ||
51 | |||
52 | MODULE_FIRMWARE(FIRMWARE_R100); | ||
53 | MODULE_FIRMWARE(FIRMWARE_R200); | ||
54 | MODULE_FIRMWARE(FIRMWARE_R300); | ||
55 | MODULE_FIRMWARE(FIRMWARE_R420); | ||
56 | MODULE_FIRMWARE(FIRMWARE_RS690); | ||
57 | MODULE_FIRMWARE(FIRMWARE_RS600); | ||
58 | MODULE_FIRMWARE(FIRMWARE_R520); | ||
59 | |||
60 | static int radeon_do_cleanup_cp(struct drm_device * dev); | ||
61 | static void radeon_do_cp_start(drm_radeon_private_t * dev_priv); | ||
62 | |||
63 | u32 radeon_read_ring_rptr(drm_radeon_private_t *dev_priv, u32 off) | ||
64 | { | ||
65 | u32 val; | ||
66 | |||
67 | if (dev_priv->flags & RADEON_IS_AGP) { | ||
68 | val = DRM_READ32(dev_priv->ring_rptr, off); | ||
69 | } else { | ||
70 | val = *(((volatile u32 *) | ||
71 | dev_priv->ring_rptr->handle) + | ||
72 | (off / sizeof(u32))); | ||
73 | val = le32_to_cpu(val); | ||
74 | } | ||
75 | return val; | ||
76 | } | ||
77 | |||
78 | u32 radeon_get_ring_head(drm_radeon_private_t *dev_priv) | ||
79 | { | ||
80 | if (dev_priv->writeback_works) | ||
81 | return radeon_read_ring_rptr(dev_priv, 0); | ||
82 | else { | ||
83 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) | ||
84 | return RADEON_READ(R600_CP_RB_RPTR); | ||
85 | else | ||
86 | return RADEON_READ(RADEON_CP_RB_RPTR); | ||
87 | } | ||
88 | } | ||
89 | |||
90 | void radeon_write_ring_rptr(drm_radeon_private_t *dev_priv, u32 off, u32 val) | ||
91 | { | ||
92 | if (dev_priv->flags & RADEON_IS_AGP) | ||
93 | DRM_WRITE32(dev_priv->ring_rptr, off, val); | ||
94 | else | ||
95 | *(((volatile u32 *) dev_priv->ring_rptr->handle) + | ||
96 | (off / sizeof(u32))) = cpu_to_le32(val); | ||
97 | } | ||
98 | |||
99 | void radeon_set_ring_head(drm_radeon_private_t *dev_priv, u32 val) | ||
100 | { | ||
101 | radeon_write_ring_rptr(dev_priv, 0, val); | ||
102 | } | ||
103 | |||
104 | u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index) | ||
105 | { | ||
106 | if (dev_priv->writeback_works) { | ||
107 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) | ||
108 | return radeon_read_ring_rptr(dev_priv, | ||
109 | R600_SCRATCHOFF(index)); | ||
110 | else | ||
111 | return radeon_read_ring_rptr(dev_priv, | ||
112 | RADEON_SCRATCHOFF(index)); | ||
113 | } else { | ||
114 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) | ||
115 | return RADEON_READ(R600_SCRATCH_REG0 + 4*index); | ||
116 | else | ||
117 | return RADEON_READ(RADEON_SCRATCH_REG0 + 4*index); | ||
118 | } | ||
119 | } | ||
120 | |||
121 | static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) | ||
122 | { | ||
123 | u32 ret; | ||
124 | RADEON_WRITE(R520_MC_IND_INDEX, 0x7f0000 | (addr & 0xff)); | ||
125 | ret = RADEON_READ(R520_MC_IND_DATA); | ||
126 | RADEON_WRITE(R520_MC_IND_INDEX, 0); | ||
127 | return ret; | ||
128 | } | ||
129 | |||
130 | static u32 RS480_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) | ||
131 | { | ||
132 | u32 ret; | ||
133 | RADEON_WRITE(RS480_NB_MC_INDEX, addr & 0xff); | ||
134 | ret = RADEON_READ(RS480_NB_MC_DATA); | ||
135 | RADEON_WRITE(RS480_NB_MC_INDEX, 0xff); | ||
136 | return ret; | ||
137 | } | ||
138 | |||
139 | static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) | ||
140 | { | ||
141 | u32 ret; | ||
142 | RADEON_WRITE(RS690_MC_INDEX, (addr & RS690_MC_INDEX_MASK)); | ||
143 | ret = RADEON_READ(RS690_MC_DATA); | ||
144 | RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_MASK); | ||
145 | return ret; | ||
146 | } | ||
147 | |||
148 | static u32 RS600_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) | ||
149 | { | ||
150 | u32 ret; | ||
151 | RADEON_WRITE(RS600_MC_INDEX, ((addr & RS600_MC_ADDR_MASK) | | ||
152 | RS600_MC_IND_CITF_ARB0)); | ||
153 | ret = RADEON_READ(RS600_MC_DATA); | ||
154 | return ret; | ||
155 | } | ||
156 | |||
157 | static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) | ||
158 | { | ||
159 | if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || | ||
160 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) | ||
161 | return RS690_READ_MCIND(dev_priv, addr); | ||
162 | else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) | ||
163 | return RS600_READ_MCIND(dev_priv, addr); | ||
164 | else | ||
165 | return RS480_READ_MCIND(dev_priv, addr); | ||
166 | } | ||
167 | |||
168 | u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv) | ||
169 | { | ||
170 | |||
171 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) | ||
172 | return RADEON_READ(R700_MC_VM_FB_LOCATION); | ||
173 | else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) | ||
174 | return RADEON_READ(R600_MC_VM_FB_LOCATION); | ||
175 | else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) | ||
176 | return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION); | ||
177 | else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || | ||
178 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) | ||
179 | return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION); | ||
180 | else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) | ||
181 | return RS600_READ_MCIND(dev_priv, RS600_MC_FB_LOCATION); | ||
182 | else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) | ||
183 | return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION); | ||
184 | else | ||
185 | return RADEON_READ(RADEON_MC_FB_LOCATION); | ||
186 | } | ||
187 | |||
188 | static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc) | ||
189 | { | ||
190 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) | ||
191 | RADEON_WRITE(R700_MC_VM_FB_LOCATION, fb_loc); | ||
192 | else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) | ||
193 | RADEON_WRITE(R600_MC_VM_FB_LOCATION, fb_loc); | ||
194 | else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) | ||
195 | R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc); | ||
196 | else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || | ||
197 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) | ||
198 | RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc); | ||
199 | else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) | ||
200 | RS600_WRITE_MCIND(RS600_MC_FB_LOCATION, fb_loc); | ||
201 | else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) | ||
202 | R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc); | ||
203 | else | ||
204 | RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc); | ||
205 | } | ||
206 | |||
207 | void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc) | ||
208 | { | ||
209 | /*R6xx/R7xx: AGP_TOP and BOT are actually 18 bits each */ | ||
210 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) { | ||
211 | RADEON_WRITE(R700_MC_VM_AGP_BOT, agp_loc & 0xffff); /* FIX ME */ | ||
212 | RADEON_WRITE(R700_MC_VM_AGP_TOP, (agp_loc >> 16) & 0xffff); | ||
213 | } else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) { | ||
214 | RADEON_WRITE(R600_MC_VM_AGP_BOT, agp_loc & 0xffff); /* FIX ME */ | ||
215 | RADEON_WRITE(R600_MC_VM_AGP_TOP, (agp_loc >> 16) & 0xffff); | ||
216 | } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) | ||
217 | R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc); | ||
218 | else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || | ||
219 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) | ||
220 | RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc); | ||
221 | else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) | ||
222 | RS600_WRITE_MCIND(RS600_MC_AGP_LOCATION, agp_loc); | ||
223 | else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) | ||
224 | R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc); | ||
225 | else | ||
226 | RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc); | ||
227 | } | ||
228 | |||
229 | void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base) | ||
230 | { | ||
231 | u32 agp_base_hi = upper_32_bits(agp_base); | ||
232 | u32 agp_base_lo = agp_base & 0xffffffff; | ||
233 | u32 r6xx_agp_base = (agp_base >> 22) & 0x3ffff; | ||
234 | |||
235 | /* R6xx/R7xx must be aligned to a 4MB boundary */ | ||
236 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) | ||
237 | RADEON_WRITE(R700_MC_VM_AGP_BASE, r6xx_agp_base); | ||
238 | else if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) | ||
239 | RADEON_WRITE(R600_MC_VM_AGP_BASE, r6xx_agp_base); | ||
240 | else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) { | ||
241 | R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo); | ||
242 | R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi); | ||
243 | } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || | ||
244 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) { | ||
245 | RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo); | ||
246 | RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi); | ||
247 | } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) { | ||
248 | RS600_WRITE_MCIND(RS600_AGP_BASE, agp_base_lo); | ||
249 | RS600_WRITE_MCIND(RS600_AGP_BASE_2, agp_base_hi); | ||
250 | } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) { | ||
251 | R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo); | ||
252 | R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi); | ||
253 | } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || | ||
254 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { | ||
255 | RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo); | ||
256 | RADEON_WRITE(RS480_AGP_BASE_2, agp_base_hi); | ||
257 | } else { | ||
258 | RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo); | ||
259 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200) | ||
260 | RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi); | ||
261 | } | ||
262 | } | ||
263 | |||
264 | void radeon_enable_bm(struct drm_radeon_private *dev_priv) | ||
265 | { | ||
266 | u32 tmp; | ||
267 | /* Turn on bus mastering */ | ||
268 | if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || | ||
269 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) { | ||
270 | /* rs600/rs690/rs740 */ | ||
271 | tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; | ||
272 | RADEON_WRITE(RADEON_BUS_CNTL, tmp); | ||
273 | } else if (((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV350) || | ||
274 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) || | ||
275 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || | ||
276 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { | ||
277 | /* r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ | ||
278 | tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; | ||
279 | RADEON_WRITE(RADEON_BUS_CNTL, tmp); | ||
280 | } /* PCIE cards appears to not need this */ | ||
281 | } | ||
282 | |||
283 | static int RADEON_READ_PLL(struct drm_device * dev, int addr) | ||
284 | { | ||
285 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
286 | |||
287 | RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, addr & 0x1f); | ||
288 | return RADEON_READ(RADEON_CLOCK_CNTL_DATA); | ||
289 | } | ||
290 | |||
291 | static u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr) | ||
292 | { | ||
293 | RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff); | ||
294 | return RADEON_READ(RADEON_PCIE_DATA); | ||
295 | } | ||
296 | |||
297 | #if RADEON_FIFO_DEBUG | ||
298 | static void radeon_status(drm_radeon_private_t * dev_priv) | ||
299 | { | ||
300 | printk("%s:\n", __func__); | ||
301 | printk("RBBM_STATUS = 0x%08x\n", | ||
302 | (unsigned int)RADEON_READ(RADEON_RBBM_STATUS)); | ||
303 | printk("CP_RB_RTPR = 0x%08x\n", | ||
304 | (unsigned int)RADEON_READ(RADEON_CP_RB_RPTR)); | ||
305 | printk("CP_RB_WTPR = 0x%08x\n", | ||
306 | (unsigned int)RADEON_READ(RADEON_CP_RB_WPTR)); | ||
307 | printk("AIC_CNTL = 0x%08x\n", | ||
308 | (unsigned int)RADEON_READ(RADEON_AIC_CNTL)); | ||
309 | printk("AIC_STAT = 0x%08x\n", | ||
310 | (unsigned int)RADEON_READ(RADEON_AIC_STAT)); | ||
311 | printk("AIC_PT_BASE = 0x%08x\n", | ||
312 | (unsigned int)RADEON_READ(RADEON_AIC_PT_BASE)); | ||
313 | printk("TLB_ADDR = 0x%08x\n", | ||
314 | (unsigned int)RADEON_READ(RADEON_AIC_TLB_ADDR)); | ||
315 | printk("TLB_DATA = 0x%08x\n", | ||
316 | (unsigned int)RADEON_READ(RADEON_AIC_TLB_DATA)); | ||
317 | } | ||
318 | #endif | ||
319 | |||
320 | /* ================================================================ | ||
321 | * Engine, FIFO control | ||
322 | */ | ||
323 | |||
324 | static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv) | ||
325 | { | ||
326 | u32 tmp; | ||
327 | int i; | ||
328 | |||
329 | dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; | ||
330 | |||
331 | if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) { | ||
332 | tmp = RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT); | ||
333 | tmp |= RADEON_RB3D_DC_FLUSH_ALL; | ||
334 | RADEON_WRITE(RADEON_RB3D_DSTCACHE_CTLSTAT, tmp); | ||
335 | |||
336 | for (i = 0; i < dev_priv->usec_timeout; i++) { | ||
337 | if (!(RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT) | ||
338 | & RADEON_RB3D_DC_BUSY)) { | ||
339 | return 0; | ||
340 | } | ||
341 | DRM_UDELAY(1); | ||
342 | } | ||
343 | } else { | ||
344 | /* don't flush or purge cache here or lockup */ | ||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | #if RADEON_FIFO_DEBUG | ||
349 | DRM_ERROR("failed!\n"); | ||
350 | radeon_status(dev_priv); | ||
351 | #endif | ||
352 | return -EBUSY; | ||
353 | } | ||
354 | |||
355 | static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries) | ||
356 | { | ||
357 | int i; | ||
358 | |||
359 | dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; | ||
360 | |||
361 | for (i = 0; i < dev_priv->usec_timeout; i++) { | ||
362 | int slots = (RADEON_READ(RADEON_RBBM_STATUS) | ||
363 | & RADEON_RBBM_FIFOCNT_MASK); | ||
364 | if (slots >= entries) | ||
365 | return 0; | ||
366 | DRM_UDELAY(1); | ||
367 | } | ||
368 | DRM_DEBUG("wait for fifo failed status : 0x%08X 0x%08X\n", | ||
369 | RADEON_READ(RADEON_RBBM_STATUS), | ||
370 | RADEON_READ(R300_VAP_CNTL_STATUS)); | ||
371 | |||
372 | #if RADEON_FIFO_DEBUG | ||
373 | DRM_ERROR("failed!\n"); | ||
374 | radeon_status(dev_priv); | ||
375 | #endif | ||
376 | return -EBUSY; | ||
377 | } | ||
378 | |||
379 | static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv) | ||
380 | { | ||
381 | int i, ret; | ||
382 | |||
383 | dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; | ||
384 | |||
385 | ret = radeon_do_wait_for_fifo(dev_priv, 64); | ||
386 | if (ret) | ||
387 | return ret; | ||
388 | |||
389 | for (i = 0; i < dev_priv->usec_timeout; i++) { | ||
390 | if (!(RADEON_READ(RADEON_RBBM_STATUS) | ||
391 | & RADEON_RBBM_ACTIVE)) { | ||
392 | radeon_do_pixcache_flush(dev_priv); | ||
393 | return 0; | ||
394 | } | ||
395 | DRM_UDELAY(1); | ||
396 | } | ||
397 | DRM_DEBUG("wait idle failed status : 0x%08X 0x%08X\n", | ||
398 | RADEON_READ(RADEON_RBBM_STATUS), | ||
399 | RADEON_READ(R300_VAP_CNTL_STATUS)); | ||
400 | |||
401 | #if RADEON_FIFO_DEBUG | ||
402 | DRM_ERROR("failed!\n"); | ||
403 | radeon_status(dev_priv); | ||
404 | #endif | ||
405 | return -EBUSY; | ||
406 | } | ||
407 | |||
408 | static void radeon_init_pipes(struct drm_device *dev) | ||
409 | { | ||
410 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
411 | uint32_t gb_tile_config, gb_pipe_sel = 0; | ||
412 | |||
413 | if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) { | ||
414 | uint32_t z_pipe_sel = RADEON_READ(RV530_GB_PIPE_SELECT2); | ||
415 | if ((z_pipe_sel & 3) == 3) | ||
416 | dev_priv->num_z_pipes = 2; | ||
417 | else | ||
418 | dev_priv->num_z_pipes = 1; | ||
419 | } else | ||
420 | dev_priv->num_z_pipes = 1; | ||
421 | |||
422 | /* RS4xx/RS6xx/R4xx/R5xx */ | ||
423 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) { | ||
424 | gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT); | ||
425 | dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1; | ||
426 | /* SE cards have 1 pipe */ | ||
427 | if ((dev->pdev->device == 0x5e4c) || | ||
428 | (dev->pdev->device == 0x5e4f)) | ||
429 | dev_priv->num_gb_pipes = 1; | ||
430 | } else { | ||
431 | /* R3xx */ | ||
432 | if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 && | ||
433 | dev->pdev->device != 0x4144) || | ||
434 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350 && | ||
435 | dev->pdev->device != 0x4148)) { | ||
436 | dev_priv->num_gb_pipes = 2; | ||
437 | } else { | ||
438 | /* RV3xx/R300 AD/R350 AH */ | ||
439 | dev_priv->num_gb_pipes = 1; | ||
440 | } | ||
441 | } | ||
442 | DRM_INFO("Num pipes: %d\n", dev_priv->num_gb_pipes); | ||
443 | |||
444 | gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16 /*| R300_SUBPIXEL_1_16*/); | ||
445 | |||
446 | switch (dev_priv->num_gb_pipes) { | ||
447 | case 2: gb_tile_config |= R300_PIPE_COUNT_R300; break; | ||
448 | case 3: gb_tile_config |= R300_PIPE_COUNT_R420_3P; break; | ||
449 | case 4: gb_tile_config |= R300_PIPE_COUNT_R420; break; | ||
450 | default: | ||
451 | case 1: gb_tile_config |= R300_PIPE_COUNT_RV350; break; | ||
452 | } | ||
453 | |||
454 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) { | ||
455 | RADEON_WRITE_PLL(R500_DYN_SCLK_PWMEM_PIPE, (1 | ((gb_pipe_sel >> 8) & 0xf) << 4)); | ||
456 | RADEON_WRITE(R300_SU_REG_DEST, ((1 << dev_priv->num_gb_pipes) - 1)); | ||
457 | } | ||
458 | RADEON_WRITE(R300_GB_TILE_CONFIG, gb_tile_config); | ||
459 | radeon_do_wait_for_idle(dev_priv); | ||
460 | RADEON_WRITE(R300_DST_PIPE_CONFIG, RADEON_READ(R300_DST_PIPE_CONFIG) | R300_PIPE_AUTO_CONFIG); | ||
461 | RADEON_WRITE(R300_RB2D_DSTCACHE_MODE, (RADEON_READ(R300_RB2D_DSTCACHE_MODE) | | ||
462 | R300_DC_AUTOFLUSH_ENABLE | | ||
463 | R300_DC_DC_DISABLE_IGNORE_PE)); | ||
464 | |||
465 | |||
466 | } | ||
467 | |||
468 | /* ================================================================ | ||
469 | * CP control, initialization | ||
470 | */ | ||
471 | |||
472 | /* Load the microcode for the CP */ | ||
473 | static int radeon_cp_init_microcode(drm_radeon_private_t *dev_priv) | ||
474 | { | ||
475 | struct platform_device *pdev; | ||
476 | const char *fw_name = NULL; | ||
477 | int err; | ||
478 | |||
479 | DRM_DEBUG("\n"); | ||
480 | |||
481 | pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); | ||
482 | err = IS_ERR(pdev); | ||
483 | if (err) { | ||
484 | printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); | ||
485 | return -EINVAL; | ||
486 | } | ||
487 | |||
488 | if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R100) || | ||
489 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV100) || | ||
490 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV200) || | ||
491 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS100) || | ||
492 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS200)) { | ||
493 | DRM_INFO("Loading R100 Microcode\n"); | ||
494 | fw_name = FIRMWARE_R100; | ||
495 | } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R200) || | ||
496 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV250) || | ||
497 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV280) || | ||
498 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS300)) { | ||
499 | DRM_INFO("Loading R200 Microcode\n"); | ||
500 | fw_name = FIRMWARE_R200; | ||
501 | } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) || | ||
502 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) || | ||
503 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) || | ||
504 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) || | ||
505 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || | ||
506 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { | ||
507 | DRM_INFO("Loading R300 Microcode\n"); | ||
508 | fw_name = FIRMWARE_R300; | ||
509 | } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) || | ||
510 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R423) || | ||
511 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) { | ||
512 | DRM_INFO("Loading R400 Microcode\n"); | ||
513 | fw_name = FIRMWARE_R420; | ||
514 | } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || | ||
515 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) { | ||
516 | DRM_INFO("Loading RS690/RS740 Microcode\n"); | ||
517 | fw_name = FIRMWARE_RS690; | ||
518 | } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) { | ||
519 | DRM_INFO("Loading RS600 Microcode\n"); | ||
520 | fw_name = FIRMWARE_RS600; | ||
521 | } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) || | ||
522 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R520) || | ||
523 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) || | ||
524 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R580) || | ||
525 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV560) || | ||
526 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV570)) { | ||
527 | DRM_INFO("Loading R500 Microcode\n"); | ||
528 | fw_name = FIRMWARE_R520; | ||
529 | } | ||
530 | |||
531 | err = request_firmware(&dev_priv->me_fw, fw_name, &pdev->dev); | ||
532 | platform_device_unregister(pdev); | ||
533 | if (err) { | ||
534 | printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n", | ||
535 | fw_name); | ||
536 | } else if (dev_priv->me_fw->size % 8) { | ||
537 | printk(KERN_ERR | ||
538 | "radeon_cp: Bogus length %zu in firmware \"%s\"\n", | ||
539 | dev_priv->me_fw->size, fw_name); | ||
540 | err = -EINVAL; | ||
541 | release_firmware(dev_priv->me_fw); | ||
542 | dev_priv->me_fw = NULL; | ||
543 | } | ||
544 | return err; | ||
545 | } | ||
546 | |||
547 | static void radeon_cp_load_microcode(drm_radeon_private_t *dev_priv) | ||
548 | { | ||
549 | const __be32 *fw_data; | ||
550 | int i, size; | ||
551 | |||
552 | radeon_do_wait_for_idle(dev_priv); | ||
553 | |||
554 | if (dev_priv->me_fw) { | ||
555 | size = dev_priv->me_fw->size / 4; | ||
556 | fw_data = (const __be32 *)&dev_priv->me_fw->data[0]; | ||
557 | RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0); | ||
558 | for (i = 0; i < size; i += 2) { | ||
559 | RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, | ||
560 | be32_to_cpup(&fw_data[i])); | ||
561 | RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, | ||
562 | be32_to_cpup(&fw_data[i + 1])); | ||
563 | } | ||
564 | } | ||
565 | } | ||
566 | |||
567 | /* Flush any pending commands to the CP. This should only be used just | ||
568 | * prior to a wait for idle, as it informs the engine that the command | ||
569 | * stream is ending. | ||
570 | */ | ||
571 | static void radeon_do_cp_flush(drm_radeon_private_t * dev_priv) | ||
572 | { | ||
573 | DRM_DEBUG("\n"); | ||
574 | #if 0 | ||
575 | u32 tmp; | ||
576 | |||
577 | tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1 << 31); | ||
578 | RADEON_WRITE(RADEON_CP_RB_WPTR, tmp); | ||
579 | #endif | ||
580 | } | ||
581 | |||
582 | /* Wait for the CP to go idle. | ||
583 | */ | ||
584 | int radeon_do_cp_idle(drm_radeon_private_t * dev_priv) | ||
585 | { | ||
586 | RING_LOCALS; | ||
587 | DRM_DEBUG("\n"); | ||
588 | |||
589 | BEGIN_RING(6); | ||
590 | |||
591 | RADEON_PURGE_CACHE(); | ||
592 | RADEON_PURGE_ZCACHE(); | ||
593 | RADEON_WAIT_UNTIL_IDLE(); | ||
594 | |||
595 | ADVANCE_RING(); | ||
596 | COMMIT_RING(); | ||
597 | |||
598 | return radeon_do_wait_for_idle(dev_priv); | ||
599 | } | ||
600 | |||
601 | /* Start the Command Processor. | ||
602 | */ | ||
603 | static void radeon_do_cp_start(drm_radeon_private_t * dev_priv) | ||
604 | { | ||
605 | RING_LOCALS; | ||
606 | DRM_DEBUG("\n"); | ||
607 | |||
608 | radeon_do_wait_for_idle(dev_priv); | ||
609 | |||
610 | RADEON_WRITE(RADEON_CP_CSQ_CNTL, dev_priv->cp_mode); | ||
611 | |||
612 | dev_priv->cp_running = 1; | ||
613 | |||
614 | /* on r420, any DMA from CP to system memory while 2D is active | ||
615 | * can cause a hang. workaround is to queue a CP RESYNC token | ||
616 | */ | ||
617 | if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) { | ||
618 | BEGIN_RING(3); | ||
619 | OUT_RING(CP_PACKET0(R300_CP_RESYNC_ADDR, 1)); | ||
620 | OUT_RING(5); /* scratch reg 5 */ | ||
621 | OUT_RING(0xdeadbeef); | ||
622 | ADVANCE_RING(); | ||
623 | COMMIT_RING(); | ||
624 | } | ||
625 | |||
626 | BEGIN_RING(8); | ||
627 | /* isync can only be written through cp on r5xx write it here */ | ||
628 | OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0)); | ||
629 | OUT_RING(RADEON_ISYNC_ANY2D_IDLE3D | | ||
630 | RADEON_ISYNC_ANY3D_IDLE2D | | ||
631 | RADEON_ISYNC_WAIT_IDLEGUI | | ||
632 | RADEON_ISYNC_CPSCRATCH_IDLEGUI); | ||
633 | RADEON_PURGE_CACHE(); | ||
634 | RADEON_PURGE_ZCACHE(); | ||
635 | RADEON_WAIT_UNTIL_IDLE(); | ||
636 | ADVANCE_RING(); | ||
637 | COMMIT_RING(); | ||
638 | |||
639 | dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED; | ||
640 | } | ||
641 | |||
642 | /* Reset the Command Processor. This will not flush any pending | ||
643 | * commands, so you must wait for the CP command stream to complete | ||
644 | * before calling this routine. | ||
645 | */ | ||
646 | static void radeon_do_cp_reset(drm_radeon_private_t * dev_priv) | ||
647 | { | ||
648 | u32 cur_read_ptr; | ||
649 | DRM_DEBUG("\n"); | ||
650 | |||
651 | cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR); | ||
652 | RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr); | ||
653 | SET_RING_HEAD(dev_priv, cur_read_ptr); | ||
654 | dev_priv->ring.tail = cur_read_ptr; | ||
655 | } | ||
656 | |||
657 | /* Stop the Command Processor. This will not flush any pending | ||
658 | * commands, so you must flush the command stream and wait for the CP | ||
659 | * to go idle before calling this routine. | ||
660 | */ | ||
661 | static void radeon_do_cp_stop(drm_radeon_private_t * dev_priv) | ||
662 | { | ||
663 | RING_LOCALS; | ||
664 | DRM_DEBUG("\n"); | ||
665 | |||
666 | /* finish the pending CP_RESYNC token */ | ||
667 | if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) { | ||
668 | BEGIN_RING(2); | ||
669 | OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); | ||
670 | OUT_RING(R300_RB3D_DC_FINISH); | ||
671 | ADVANCE_RING(); | ||
672 | COMMIT_RING(); | ||
673 | radeon_do_wait_for_idle(dev_priv); | ||
674 | } | ||
675 | |||
676 | RADEON_WRITE(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS); | ||
677 | |||
678 | dev_priv->cp_running = 0; | ||
679 | } | ||
680 | |||
681 | /* Reset the engine. This will stop the CP if it is running. | ||
682 | */ | ||
683 | static int radeon_do_engine_reset(struct drm_device * dev) | ||
684 | { | ||
685 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
686 | u32 clock_cntl_index = 0, mclk_cntl = 0, rbbm_soft_reset; | ||
687 | DRM_DEBUG("\n"); | ||
688 | |||
689 | radeon_do_pixcache_flush(dev_priv); | ||
690 | |||
691 | if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) { | ||
692 | /* may need something similar for newer chips */ | ||
693 | clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX); | ||
694 | mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL); | ||
695 | |||
696 | RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl | | ||
697 | RADEON_FORCEON_MCLKA | | ||
698 | RADEON_FORCEON_MCLKB | | ||
699 | RADEON_FORCEON_YCLKA | | ||
700 | RADEON_FORCEON_YCLKB | | ||
701 | RADEON_FORCEON_MC | | ||
702 | RADEON_FORCEON_AIC)); | ||
703 | } | ||
704 | |||
705 | rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET); | ||
706 | |||
707 | RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset | | ||
708 | RADEON_SOFT_RESET_CP | | ||
709 | RADEON_SOFT_RESET_HI | | ||
710 | RADEON_SOFT_RESET_SE | | ||
711 | RADEON_SOFT_RESET_RE | | ||
712 | RADEON_SOFT_RESET_PP | | ||
713 | RADEON_SOFT_RESET_E2 | | ||
714 | RADEON_SOFT_RESET_RB)); | ||
715 | RADEON_READ(RADEON_RBBM_SOFT_RESET); | ||
716 | RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset & | ||
717 | ~(RADEON_SOFT_RESET_CP | | ||
718 | RADEON_SOFT_RESET_HI | | ||
719 | RADEON_SOFT_RESET_SE | | ||
720 | RADEON_SOFT_RESET_RE | | ||
721 | RADEON_SOFT_RESET_PP | | ||
722 | RADEON_SOFT_RESET_E2 | | ||
723 | RADEON_SOFT_RESET_RB))); | ||
724 | RADEON_READ(RADEON_RBBM_SOFT_RESET); | ||
725 | |||
726 | if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) { | ||
727 | RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl); | ||
728 | RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index); | ||
729 | RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset); | ||
730 | } | ||
731 | |||
732 | /* setup the raster pipes */ | ||
733 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300) | ||
734 | radeon_init_pipes(dev); | ||
735 | |||
736 | /* Reset the CP ring */ | ||
737 | radeon_do_cp_reset(dev_priv); | ||
738 | |||
739 | /* The CP is no longer running after an engine reset */ | ||
740 | dev_priv->cp_running = 0; | ||
741 | |||
742 | /* Reset any pending vertex, indirect buffers */ | ||
743 | radeon_freelist_reset(dev); | ||
744 | |||
745 | return 0; | ||
746 | } | ||
747 | |||
748 | static void radeon_cp_init_ring_buffer(struct drm_device * dev, | ||
749 | drm_radeon_private_t *dev_priv, | ||
750 | struct drm_file *file_priv) | ||
751 | { | ||
752 | struct drm_radeon_master_private *master_priv; | ||
753 | u32 ring_start, cur_read_ptr; | ||
754 | |||
755 | /* Initialize the memory controller. With new memory map, the fb location | ||
756 | * is not changed, it should have been properly initialized already. Part | ||
757 | * of the problem is that the code below is bogus, assuming the GART is | ||
758 | * always appended to the fb which is not necessarily the case | ||
759 | */ | ||
760 | if (!dev_priv->new_memmap) | ||
761 | radeon_write_fb_location(dev_priv, | ||
762 | ((dev_priv->gart_vm_start - 1) & 0xffff0000) | ||
763 | | (dev_priv->fb_location >> 16)); | ||
764 | |||
765 | #if IS_ENABLED(CONFIG_AGP) | ||
766 | if (dev_priv->flags & RADEON_IS_AGP) { | ||
767 | radeon_write_agp_base(dev_priv, dev->agp->base); | ||
768 | |||
769 | radeon_write_agp_location(dev_priv, | ||
770 | (((dev_priv->gart_vm_start - 1 + | ||
771 | dev_priv->gart_size) & 0xffff0000) | | ||
772 | (dev_priv->gart_vm_start >> 16))); | ||
773 | |||
774 | ring_start = (dev_priv->cp_ring->offset | ||
775 | - dev->agp->base | ||
776 | + dev_priv->gart_vm_start); | ||
777 | } else | ||
778 | #endif | ||
779 | ring_start = (dev_priv->cp_ring->offset | ||
780 | - (unsigned long)dev->sg->virtual | ||
781 | + dev_priv->gart_vm_start); | ||
782 | |||
783 | RADEON_WRITE(RADEON_CP_RB_BASE, ring_start); | ||
784 | |||
785 | /* Set the write pointer delay */ | ||
786 | RADEON_WRITE(RADEON_CP_RB_WPTR_DELAY, 0); | ||
787 | |||
788 | /* Initialize the ring buffer's read and write pointers */ | ||
789 | cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR); | ||
790 | RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr); | ||
791 | SET_RING_HEAD(dev_priv, cur_read_ptr); | ||
792 | dev_priv->ring.tail = cur_read_ptr; | ||
793 | |||
794 | #if IS_ENABLED(CONFIG_AGP) | ||
795 | if (dev_priv->flags & RADEON_IS_AGP) { | ||
796 | RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, | ||
797 | dev_priv->ring_rptr->offset | ||
798 | - dev->agp->base + dev_priv->gart_vm_start); | ||
799 | } else | ||
800 | #endif | ||
801 | { | ||
802 | RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, | ||
803 | dev_priv->ring_rptr->offset | ||
804 | - ((unsigned long) dev->sg->virtual) | ||
805 | + dev_priv->gart_vm_start); | ||
806 | } | ||
807 | |||
808 | /* Set ring buffer size */ | ||
809 | #ifdef __BIG_ENDIAN | ||
810 | RADEON_WRITE(RADEON_CP_RB_CNTL, | ||
811 | RADEON_BUF_SWAP_32BIT | | ||
812 | (dev_priv->ring.fetch_size_l2ow << 18) | | ||
813 | (dev_priv->ring.rptr_update_l2qw << 8) | | ||
814 | dev_priv->ring.size_l2qw); | ||
815 | #else | ||
816 | RADEON_WRITE(RADEON_CP_RB_CNTL, | ||
817 | (dev_priv->ring.fetch_size_l2ow << 18) | | ||
818 | (dev_priv->ring.rptr_update_l2qw << 8) | | ||
819 | dev_priv->ring.size_l2qw); | ||
820 | #endif | ||
821 | |||
822 | |||
823 | /* Initialize the scratch register pointer. This will cause | ||
824 | * the scratch register values to be written out to memory | ||
825 | * whenever they are updated. | ||
826 | * | ||
827 | * We simply put this behind the ring read pointer, this works | ||
828 | * with PCI GART as well as (whatever kind of) AGP GART | ||
829 | */ | ||
830 | RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR) | ||
831 | + RADEON_SCRATCH_REG_OFFSET); | ||
832 | |||
833 | RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7); | ||
834 | |||
835 | radeon_enable_bm(dev_priv); | ||
836 | |||
837 | radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(0), 0); | ||
838 | RADEON_WRITE(RADEON_LAST_FRAME_REG, 0); | ||
839 | |||
840 | radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1), 0); | ||
841 | RADEON_WRITE(RADEON_LAST_DISPATCH_REG, 0); | ||
842 | |||
843 | radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(2), 0); | ||
844 | RADEON_WRITE(RADEON_LAST_CLEAR_REG, 0); | ||
845 | |||
846 | /* reset sarea copies of these */ | ||
847 | master_priv = file_priv->master->driver_priv; | ||
848 | if (master_priv->sarea_priv) { | ||
849 | master_priv->sarea_priv->last_frame = 0; | ||
850 | master_priv->sarea_priv->last_dispatch = 0; | ||
851 | master_priv->sarea_priv->last_clear = 0; | ||
852 | } | ||
853 | |||
854 | radeon_do_wait_for_idle(dev_priv); | ||
855 | |||
856 | /* Sync everything up */ | ||
857 | RADEON_WRITE(RADEON_ISYNC_CNTL, | ||
858 | (RADEON_ISYNC_ANY2D_IDLE3D | | ||
859 | RADEON_ISYNC_ANY3D_IDLE2D | | ||
860 | RADEON_ISYNC_WAIT_IDLEGUI | | ||
861 | RADEON_ISYNC_CPSCRATCH_IDLEGUI)); | ||
862 | |||
863 | } | ||
864 | |||
865 | static void radeon_test_writeback(drm_radeon_private_t * dev_priv) | ||
866 | { | ||
867 | u32 tmp; | ||
868 | |||
869 | /* Start with assuming that writeback doesn't work */ | ||
870 | dev_priv->writeback_works = 0; | ||
871 | |||
872 | /* Writeback doesn't seem to work everywhere, test it here and possibly | ||
873 | * enable it if it appears to work | ||
874 | */ | ||
875 | radeon_write_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1), 0); | ||
876 | |||
877 | RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef); | ||
878 | |||
879 | for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) { | ||
880 | u32 val; | ||
881 | |||
882 | val = radeon_read_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1)); | ||
883 | if (val == 0xdeadbeef) | ||
884 | break; | ||
885 | DRM_UDELAY(1); | ||
886 | } | ||
887 | |||
888 | if (tmp < dev_priv->usec_timeout) { | ||
889 | dev_priv->writeback_works = 1; | ||
890 | DRM_INFO("writeback test succeeded in %d usecs\n", tmp); | ||
891 | } else { | ||
892 | dev_priv->writeback_works = 0; | ||
893 | DRM_INFO("writeback test failed\n"); | ||
894 | } | ||
895 | if (radeon_no_wb == 1) { | ||
896 | dev_priv->writeback_works = 0; | ||
897 | DRM_INFO("writeback forced off\n"); | ||
898 | } | ||
899 | |||
900 | if (!dev_priv->writeback_works) { | ||
901 | /* Disable writeback to avoid unnecessary bus master transfer */ | ||
902 | RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_READ(RADEON_CP_RB_CNTL) | | ||
903 | RADEON_RB_NO_UPDATE); | ||
904 | RADEON_WRITE(RADEON_SCRATCH_UMSK, 0); | ||
905 | } | ||
906 | } | ||
907 | |||
908 | /* Enable or disable IGP GART on the chip */ | ||
909 | static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on) | ||
910 | { | ||
911 | u32 temp; | ||
912 | |||
913 | if (on) { | ||
914 | DRM_DEBUG("programming igp gart %08X %08lX %08X\n", | ||
915 | dev_priv->gart_vm_start, | ||
916 | (long)dev_priv->gart_info.bus_addr, | ||
917 | dev_priv->gart_size); | ||
918 | |||
919 | temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL); | ||
920 | if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || | ||
921 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) | ||
922 | IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN | | ||
923 | RS690_BLOCK_GFX_D3_EN)); | ||
924 | else | ||
925 | IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN); | ||
926 | |||
927 | IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | | ||
928 | RS480_VA_SIZE_32MB)); | ||
929 | |||
930 | temp = IGP_READ_MCIND(dev_priv, RS480_GART_FEATURE_ID); | ||
931 | IGP_WRITE_MCIND(RS480_GART_FEATURE_ID, (RS480_HANG_EN | | ||
932 | RS480_TLB_ENABLE | | ||
933 | RS480_GTW_LAC_EN | | ||
934 | RS480_1LEVEL_GART)); | ||
935 | |||
936 | temp = dev_priv->gart_info.bus_addr & 0xfffff000; | ||
937 | temp |= (upper_32_bits(dev_priv->gart_info.bus_addr) & 0xff) << 4; | ||
938 | IGP_WRITE_MCIND(RS480_GART_BASE, temp); | ||
939 | |||
940 | temp = IGP_READ_MCIND(dev_priv, RS480_AGP_MODE_CNTL); | ||
941 | IGP_WRITE_MCIND(RS480_AGP_MODE_CNTL, ((1 << RS480_REQ_TYPE_SNOOP_SHIFT) | | ||
942 | RS480_REQ_TYPE_SNOOP_DIS)); | ||
943 | |||
944 | radeon_write_agp_base(dev_priv, dev_priv->gart_vm_start); | ||
945 | |||
946 | dev_priv->gart_size = 32*1024*1024; | ||
947 | temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) & | ||
948 | 0xffff0000) | (dev_priv->gart_vm_start >> 16)); | ||
949 | |||
950 | radeon_write_agp_location(dev_priv, temp); | ||
951 | |||
952 | temp = IGP_READ_MCIND(dev_priv, RS480_AGP_ADDRESS_SPACE_SIZE); | ||
953 | IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | | ||
954 | RS480_VA_SIZE_32MB)); | ||
955 | |||
956 | do { | ||
957 | temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL); | ||
958 | if ((temp & RS480_GART_CACHE_INVALIDATE) == 0) | ||
959 | break; | ||
960 | DRM_UDELAY(1); | ||
961 | } while (1); | ||
962 | |||
963 | IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, | ||
964 | RS480_GART_CACHE_INVALIDATE); | ||
965 | |||
966 | do { | ||
967 | temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL); | ||
968 | if ((temp & RS480_GART_CACHE_INVALIDATE) == 0) | ||
969 | break; | ||
970 | DRM_UDELAY(1); | ||
971 | } while (1); | ||
972 | |||
973 | IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, 0); | ||
974 | } else { | ||
975 | IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, 0); | ||
976 | } | ||
977 | } | ||
978 | |||
979 | /* Enable or disable IGP GART on the chip */ | ||
980 | static void rs600_set_igpgart(drm_radeon_private_t *dev_priv, int on) | ||
981 | { | ||
982 | u32 temp; | ||
983 | int i; | ||
984 | |||
985 | if (on) { | ||
986 | DRM_DEBUG("programming igp gart %08X %08lX %08X\n", | ||
987 | dev_priv->gart_vm_start, | ||
988 | (long)dev_priv->gart_info.bus_addr, | ||
989 | dev_priv->gart_size); | ||
990 | |||
991 | IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, (RS600_EFFECTIVE_L2_CACHE_SIZE(6) | | ||
992 | RS600_EFFECTIVE_L2_QUEUE_SIZE(6))); | ||
993 | |||
994 | for (i = 0; i < 19; i++) | ||
995 | IGP_WRITE_MCIND(RS600_MC_PT0_CLIENT0_CNTL + i, | ||
996 | (RS600_ENABLE_TRANSLATION_MODE_OVERRIDE | | ||
997 | RS600_SYSTEM_ACCESS_MODE_IN_SYS | | ||
998 | RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASSTHROUGH | | ||
999 | RS600_EFFECTIVE_L1_CACHE_SIZE(3) | | ||
1000 | RS600_ENABLE_FRAGMENT_PROCESSING | | ||
1001 | RS600_EFFECTIVE_L1_QUEUE_SIZE(3))); | ||
1002 | |||
1003 | IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_CNTL, (RS600_ENABLE_PAGE_TABLE | | ||
1004 | RS600_PAGE_TABLE_TYPE_FLAT)); | ||
1005 | |||
1006 | /* disable all other contexts */ | ||
1007 | for (i = 1; i < 8; i++) | ||
1008 | IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_CNTL + i, 0); | ||
1009 | |||
1010 | /* setup the page table aperture */ | ||
1011 | IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, | ||
1012 | dev_priv->gart_info.bus_addr); | ||
1013 | IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR, | ||
1014 | dev_priv->gart_vm_start); | ||
1015 | IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR, | ||
1016 | (dev_priv->gart_vm_start + dev_priv->gart_size - 1)); | ||
1017 | IGP_WRITE_MCIND(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); | ||
1018 | |||
1019 | /* setup the system aperture */ | ||
1020 | IGP_WRITE_MCIND(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, | ||
1021 | dev_priv->gart_vm_start); | ||
1022 | IGP_WRITE_MCIND(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, | ||
1023 | (dev_priv->gart_vm_start + dev_priv->gart_size - 1)); | ||
1024 | |||
1025 | /* enable page tables */ | ||
1026 | temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL); | ||
1027 | IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, (temp | RS600_ENABLE_PT)); | ||
1028 | |||
1029 | temp = IGP_READ_MCIND(dev_priv, RS600_MC_CNTL1); | ||
1030 | IGP_WRITE_MCIND(RS600_MC_CNTL1, (temp | RS600_ENABLE_PAGE_TABLES)); | ||
1031 | |||
1032 | /* invalidate the cache */ | ||
1033 | temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL); | ||
1034 | |||
1035 | temp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); | ||
1036 | IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp); | ||
1037 | temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL); | ||
1038 | |||
1039 | temp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE; | ||
1040 | IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp); | ||
1041 | temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL); | ||
1042 | |||
1043 | temp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); | ||
1044 | IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, temp); | ||
1045 | temp = IGP_READ_MCIND(dev_priv, RS600_MC_PT0_CNTL); | ||
1046 | |||
1047 | } else { | ||
1048 | IGP_WRITE_MCIND(RS600_MC_PT0_CNTL, 0); | ||
1049 | temp = IGP_READ_MCIND(dev_priv, RS600_MC_CNTL1); | ||
1050 | temp &= ~RS600_ENABLE_PAGE_TABLES; | ||
1051 | IGP_WRITE_MCIND(RS600_MC_CNTL1, temp); | ||
1052 | } | ||
1053 | } | ||
1054 | |||
1055 | static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on) | ||
1056 | { | ||
1057 | u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL); | ||
1058 | if (on) { | ||
1059 | |||
1060 | DRM_DEBUG("programming pcie %08X %08lX %08X\n", | ||
1061 | dev_priv->gart_vm_start, | ||
1062 | (long)dev_priv->gart_info.bus_addr, | ||
1063 | dev_priv->gart_size); | ||
1064 | RADEON_WRITE_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, | ||
1065 | dev_priv->gart_vm_start); | ||
1066 | RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_BASE, | ||
1067 | dev_priv->gart_info.bus_addr); | ||
1068 | RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_START_LO, | ||
1069 | dev_priv->gart_vm_start); | ||
1070 | RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_END_LO, | ||
1071 | dev_priv->gart_vm_start + | ||
1072 | dev_priv->gart_size - 1); | ||
1073 | |||
1074 | radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */ | ||
1075 | |||
1076 | RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, | ||
1077 | RADEON_PCIE_TX_GART_EN); | ||
1078 | } else { | ||
1079 | RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, | ||
1080 | tmp & ~RADEON_PCIE_TX_GART_EN); | ||
1081 | } | ||
1082 | } | ||
1083 | |||
1084 | /* Enable or disable PCI GART on the chip */ | ||
1085 | static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on) | ||
1086 | { | ||
1087 | u32 tmp; | ||
1088 | |||
1089 | if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || | ||
1090 | ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740) || | ||
1091 | (dev_priv->flags & RADEON_IS_IGPGART)) { | ||
1092 | radeon_set_igpgart(dev_priv, on); | ||
1093 | return; | ||
1094 | } | ||
1095 | |||
1096 | if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) { | ||
1097 | rs600_set_igpgart(dev_priv, on); | ||
1098 | return; | ||
1099 | } | ||
1100 | |||
1101 | if (dev_priv->flags & RADEON_IS_PCIE) { | ||
1102 | radeon_set_pciegart(dev_priv, on); | ||
1103 | return; | ||
1104 | } | ||
1105 | |||
1106 | tmp = RADEON_READ(RADEON_AIC_CNTL); | ||
1107 | |||
1108 | if (on) { | ||
1109 | RADEON_WRITE(RADEON_AIC_CNTL, | ||
1110 | tmp | RADEON_PCIGART_TRANSLATE_EN); | ||
1111 | |||
1112 | /* set PCI GART page-table base address | ||
1113 | */ | ||
1114 | RADEON_WRITE(RADEON_AIC_PT_BASE, dev_priv->gart_info.bus_addr); | ||
1115 | |||
1116 | /* set address range for PCI address translate | ||
1117 | */ | ||
1118 | RADEON_WRITE(RADEON_AIC_LO_ADDR, dev_priv->gart_vm_start); | ||
1119 | RADEON_WRITE(RADEON_AIC_HI_ADDR, dev_priv->gart_vm_start | ||
1120 | + dev_priv->gart_size - 1); | ||
1121 | |||
1122 | /* Turn off AGP aperture -- is this required for PCI GART? | ||
1123 | */ | ||
1124 | radeon_write_agp_location(dev_priv, 0xffffffc0); | ||
1125 | RADEON_WRITE(RADEON_AGP_COMMAND, 0); /* clear AGP_COMMAND */ | ||
1126 | } else { | ||
1127 | RADEON_WRITE(RADEON_AIC_CNTL, | ||
1128 | tmp & ~RADEON_PCIGART_TRANSLATE_EN); | ||
1129 | } | ||
1130 | } | ||
1131 | |||
1132 | static int radeon_setup_pcigart_surface(drm_radeon_private_t *dev_priv) | ||
1133 | { | ||
1134 | struct drm_ati_pcigart_info *gart_info = &dev_priv->gart_info; | ||
1135 | struct radeon_virt_surface *vp; | ||
1136 | int i; | ||
1137 | |||
1138 | for (i = 0; i < RADEON_MAX_SURFACES * 2; i++) { | ||
1139 | if (!dev_priv->virt_surfaces[i].file_priv || | ||
1140 | dev_priv->virt_surfaces[i].file_priv == PCIGART_FILE_PRIV) | ||
1141 | break; | ||
1142 | } | ||
1143 | if (i >= 2 * RADEON_MAX_SURFACES) | ||
1144 | return -ENOMEM; | ||
1145 | vp = &dev_priv->virt_surfaces[i]; | ||
1146 | |||
1147 | for (i = 0; i < RADEON_MAX_SURFACES; i++) { | ||
1148 | struct radeon_surface *sp = &dev_priv->surfaces[i]; | ||
1149 | if (sp->refcount) | ||
1150 | continue; | ||
1151 | |||
1152 | vp->surface_index = i; | ||
1153 | vp->lower = gart_info->bus_addr; | ||
1154 | vp->upper = vp->lower + gart_info->table_size; | ||
1155 | vp->flags = 0; | ||
1156 | vp->file_priv = PCIGART_FILE_PRIV; | ||
1157 | |||
1158 | sp->refcount = 1; | ||
1159 | sp->lower = vp->lower; | ||
1160 | sp->upper = vp->upper; | ||
1161 | sp->flags = 0; | ||
1162 | |||
1163 | RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, sp->flags); | ||
1164 | RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * i, sp->lower); | ||
1165 | RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * i, sp->upper); | ||
1166 | return 0; | ||
1167 | } | ||
1168 | |||
1169 | return -ENOMEM; | ||
1170 | } | ||
1171 | |||
1172 | static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, | ||
1173 | struct drm_file *file_priv) | ||
1174 | { | ||
1175 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1176 | struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; | ||
1177 | |||
1178 | DRM_DEBUG("\n"); | ||
1179 | |||
1180 | /* if we require new memory map but we don't have it fail */ | ||
1181 | if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) { | ||
1182 | DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n"); | ||
1183 | radeon_do_cleanup_cp(dev); | ||
1184 | return -EINVAL; | ||
1185 | } | ||
1186 | |||
1187 | if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) { | ||
1188 | DRM_DEBUG("Forcing AGP card to PCI mode\n"); | ||
1189 | dev_priv->flags &= ~RADEON_IS_AGP; | ||
1190 | } else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE)) | ||
1191 | && !init->is_pci) { | ||
1192 | DRM_DEBUG("Restoring AGP flag\n"); | ||
1193 | dev_priv->flags |= RADEON_IS_AGP; | ||
1194 | } | ||
1195 | |||
1196 | if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) { | ||
1197 | DRM_ERROR("PCI GART memory not allocated!\n"); | ||
1198 | radeon_do_cleanup_cp(dev); | ||
1199 | return -EINVAL; | ||
1200 | } | ||
1201 | |||
1202 | dev_priv->usec_timeout = init->usec_timeout; | ||
1203 | if (dev_priv->usec_timeout < 1 || | ||
1204 | dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) { | ||
1205 | DRM_DEBUG("TIMEOUT problem!\n"); | ||
1206 | radeon_do_cleanup_cp(dev); | ||
1207 | return -EINVAL; | ||
1208 | } | ||
1209 | |||
1210 | /* Enable vblank on CRTC1 for older X servers | ||
1211 | */ | ||
1212 | dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1; | ||
1213 | |||
1214 | switch(init->func) { | ||
1215 | case RADEON_INIT_R200_CP: | ||
1216 | dev_priv->microcode_version = UCODE_R200; | ||
1217 | break; | ||
1218 | case RADEON_INIT_R300_CP: | ||
1219 | dev_priv->microcode_version = UCODE_R300; | ||
1220 | break; | ||
1221 | default: | ||
1222 | dev_priv->microcode_version = UCODE_R100; | ||
1223 | } | ||
1224 | |||
1225 | dev_priv->do_boxes = 0; | ||
1226 | dev_priv->cp_mode = init->cp_mode; | ||
1227 | |||
1228 | /* We don't support anything other than bus-mastering ring mode, | ||
1229 | * but the ring can be in either AGP or PCI space for the ring | ||
1230 | * read pointer. | ||
1231 | */ | ||
1232 | if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) && | ||
1233 | (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) { | ||
1234 | DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode); | ||
1235 | radeon_do_cleanup_cp(dev); | ||
1236 | return -EINVAL; | ||
1237 | } | ||
1238 | |||
1239 | switch (init->fb_bpp) { | ||
1240 | case 16: | ||
1241 | dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565; | ||
1242 | break; | ||
1243 | case 32: | ||
1244 | default: | ||
1245 | dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888; | ||
1246 | break; | ||
1247 | } | ||
1248 | dev_priv->front_offset = init->front_offset; | ||
1249 | dev_priv->front_pitch = init->front_pitch; | ||
1250 | dev_priv->back_offset = init->back_offset; | ||
1251 | dev_priv->back_pitch = init->back_pitch; | ||
1252 | |||
1253 | switch (init->depth_bpp) { | ||
1254 | case 16: | ||
1255 | dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z; | ||
1256 | break; | ||
1257 | case 32: | ||
1258 | default: | ||
1259 | dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z; | ||
1260 | break; | ||
1261 | } | ||
1262 | dev_priv->depth_offset = init->depth_offset; | ||
1263 | dev_priv->depth_pitch = init->depth_pitch; | ||
1264 | |||
1265 | /* Hardware state for depth clears. Remove this if/when we no | ||
1266 | * longer clear the depth buffer with a 3D rectangle. Hard-code | ||
1267 | * all values to prevent unwanted 3D state from slipping through | ||
1268 | * and screwing with the clear operation. | ||
1269 | */ | ||
1270 | dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE | | ||
1271 | (dev_priv->color_fmt << 10) | | ||
1272 | (dev_priv->microcode_version == | ||
1273 | UCODE_R100 ? RADEON_ZBLOCK16 : 0)); | ||
1274 | |||
1275 | dev_priv->depth_clear.rb3d_zstencilcntl = | ||
1276 | (dev_priv->depth_fmt | | ||
1277 | RADEON_Z_TEST_ALWAYS | | ||
1278 | RADEON_STENCIL_TEST_ALWAYS | | ||
1279 | RADEON_STENCIL_S_FAIL_REPLACE | | ||
1280 | RADEON_STENCIL_ZPASS_REPLACE | | ||
1281 | RADEON_STENCIL_ZFAIL_REPLACE | RADEON_Z_WRITE_ENABLE); | ||
1282 | |||
1283 | dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW | | ||
1284 | RADEON_BFACE_SOLID | | ||
1285 | RADEON_FFACE_SOLID | | ||
1286 | RADEON_FLAT_SHADE_VTX_LAST | | ||
1287 | RADEON_DIFFUSE_SHADE_FLAT | | ||
1288 | RADEON_ALPHA_SHADE_FLAT | | ||
1289 | RADEON_SPECULAR_SHADE_FLAT | | ||
1290 | RADEON_FOG_SHADE_FLAT | | ||
1291 | RADEON_VTX_PIX_CENTER_OGL | | ||
1292 | RADEON_ROUND_MODE_TRUNC | | ||
1293 | RADEON_ROUND_PREC_8TH_PIX); | ||
1294 | |||
1295 | |||
1296 | dev_priv->ring_offset = init->ring_offset; | ||
1297 | dev_priv->ring_rptr_offset = init->ring_rptr_offset; | ||
1298 | dev_priv->buffers_offset = init->buffers_offset; | ||
1299 | dev_priv->gart_textures_offset = init->gart_textures_offset; | ||
1300 | |||
1301 | master_priv->sarea = drm_legacy_getsarea(dev); | ||
1302 | if (!master_priv->sarea) { | ||
1303 | DRM_ERROR("could not find sarea!\n"); | ||
1304 | radeon_do_cleanup_cp(dev); | ||
1305 | return -EINVAL; | ||
1306 | } | ||
1307 | |||
1308 | dev_priv->cp_ring = drm_legacy_findmap(dev, init->ring_offset); | ||
1309 | if (!dev_priv->cp_ring) { | ||
1310 | DRM_ERROR("could not find cp ring region!\n"); | ||
1311 | radeon_do_cleanup_cp(dev); | ||
1312 | return -EINVAL; | ||
1313 | } | ||
1314 | dev_priv->ring_rptr = drm_legacy_findmap(dev, init->ring_rptr_offset); | ||
1315 | if (!dev_priv->ring_rptr) { | ||
1316 | DRM_ERROR("could not find ring read pointer!\n"); | ||
1317 | radeon_do_cleanup_cp(dev); | ||
1318 | return -EINVAL; | ||
1319 | } | ||
1320 | dev->agp_buffer_token = init->buffers_offset; | ||
1321 | dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset); | ||
1322 | if (!dev->agp_buffer_map) { | ||
1323 | DRM_ERROR("could not find dma buffer region!\n"); | ||
1324 | radeon_do_cleanup_cp(dev); | ||
1325 | return -EINVAL; | ||
1326 | } | ||
1327 | |||
1328 | if (init->gart_textures_offset) { | ||
1329 | dev_priv->gart_textures = | ||
1330 | drm_legacy_findmap(dev, init->gart_textures_offset); | ||
1331 | if (!dev_priv->gart_textures) { | ||
1332 | DRM_ERROR("could not find GART texture region!\n"); | ||
1333 | radeon_do_cleanup_cp(dev); | ||
1334 | return -EINVAL; | ||
1335 | } | ||
1336 | } | ||
1337 | |||
1338 | #if IS_ENABLED(CONFIG_AGP) | ||
1339 | if (dev_priv->flags & RADEON_IS_AGP) { | ||
1340 | drm_legacy_ioremap_wc(dev_priv->cp_ring, dev); | ||
1341 | drm_legacy_ioremap_wc(dev_priv->ring_rptr, dev); | ||
1342 | drm_legacy_ioremap_wc(dev->agp_buffer_map, dev); | ||
1343 | if (!dev_priv->cp_ring->handle || | ||
1344 | !dev_priv->ring_rptr->handle || | ||
1345 | !dev->agp_buffer_map->handle) { | ||
1346 | DRM_ERROR("could not find ioremap agp regions!\n"); | ||
1347 | radeon_do_cleanup_cp(dev); | ||
1348 | return -EINVAL; | ||
1349 | } | ||
1350 | } else | ||
1351 | #endif | ||
1352 | { | ||
1353 | dev_priv->cp_ring->handle = | ||
1354 | (void *)(unsigned long)dev_priv->cp_ring->offset; | ||
1355 | dev_priv->ring_rptr->handle = | ||
1356 | (void *)(unsigned long)dev_priv->ring_rptr->offset; | ||
1357 | dev->agp_buffer_map->handle = | ||
1358 | (void *)(unsigned long)dev->agp_buffer_map->offset; | ||
1359 | |||
1360 | DRM_DEBUG("dev_priv->cp_ring->handle %p\n", | ||
1361 | dev_priv->cp_ring->handle); | ||
1362 | DRM_DEBUG("dev_priv->ring_rptr->handle %p\n", | ||
1363 | dev_priv->ring_rptr->handle); | ||
1364 | DRM_DEBUG("dev->agp_buffer_map->handle %p\n", | ||
1365 | dev->agp_buffer_map->handle); | ||
1366 | } | ||
1367 | |||
1368 | dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16; | ||
1369 | dev_priv->fb_size = | ||
1370 | ((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000) | ||
1371 | - dev_priv->fb_location; | ||
1372 | |||
1373 | dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) | | ||
1374 | ((dev_priv->front_offset | ||
1375 | + dev_priv->fb_location) >> 10)); | ||
1376 | |||
1377 | dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) | | ||
1378 | ((dev_priv->back_offset | ||
1379 | + dev_priv->fb_location) >> 10)); | ||
1380 | |||
1381 | dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) | | ||
1382 | ((dev_priv->depth_offset | ||
1383 | + dev_priv->fb_location) >> 10)); | ||
1384 | |||
1385 | dev_priv->gart_size = init->gart_size; | ||
1386 | |||
1387 | /* New let's set the memory map ... */ | ||
1388 | if (dev_priv->new_memmap) { | ||
1389 | u32 base = 0; | ||
1390 | |||
1391 | DRM_INFO("Setting GART location based on new memory map\n"); | ||
1392 | |||
1393 | /* If using AGP, try to locate the AGP aperture at the same | ||
1394 | * location in the card and on the bus, though we have to | ||
1395 | * align it down. | ||
1396 | */ | ||
1397 | #if IS_ENABLED(CONFIG_AGP) | ||
1398 | if (dev_priv->flags & RADEON_IS_AGP) { | ||
1399 | base = dev->agp->base; | ||
1400 | /* Check if valid */ | ||
1401 | if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location && | ||
1402 | base < (dev_priv->fb_location + dev_priv->fb_size - 1)) { | ||
1403 | DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n", | ||
1404 | dev->agp->base); | ||
1405 | base = 0; | ||
1406 | } | ||
1407 | } | ||
1408 | #endif | ||
1409 | /* If not or if AGP is at 0 (Macs), try to put it elsewhere */ | ||
1410 | if (base == 0) { | ||
1411 | base = dev_priv->fb_location + dev_priv->fb_size; | ||
1412 | if (base < dev_priv->fb_location || | ||
1413 | ((base + dev_priv->gart_size) & 0xfffffffful) < base) | ||
1414 | base = dev_priv->fb_location | ||
1415 | - dev_priv->gart_size; | ||
1416 | } | ||
1417 | dev_priv->gart_vm_start = base & 0xffc00000u; | ||
1418 | if (dev_priv->gart_vm_start != base) | ||
1419 | DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n", | ||
1420 | base, dev_priv->gart_vm_start); | ||
1421 | } else { | ||
1422 | DRM_INFO("Setting GART location based on old memory map\n"); | ||
1423 | dev_priv->gart_vm_start = dev_priv->fb_location + | ||
1424 | RADEON_READ(RADEON_CONFIG_APER_SIZE); | ||
1425 | } | ||
1426 | |||
1427 | #if IS_ENABLED(CONFIG_AGP) | ||
1428 | if (dev_priv->flags & RADEON_IS_AGP) | ||
1429 | dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset | ||
1430 | - dev->agp->base | ||
1431 | + dev_priv->gart_vm_start); | ||
1432 | else | ||
1433 | #endif | ||
1434 | dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset | ||
1435 | - (unsigned long)dev->sg->virtual | ||
1436 | + dev_priv->gart_vm_start); | ||
1437 | |||
1438 | DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size); | ||
1439 | DRM_DEBUG("dev_priv->gart_vm_start 0x%x\n", dev_priv->gart_vm_start); | ||
1440 | DRM_DEBUG("dev_priv->gart_buffers_offset 0x%lx\n", | ||
1441 | dev_priv->gart_buffers_offset); | ||
1442 | |||
1443 | dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle; | ||
1444 | dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle | ||
1445 | + init->ring_size / sizeof(u32)); | ||
1446 | dev_priv->ring.size = init->ring_size; | ||
1447 | dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8); | ||
1448 | |||
1449 | dev_priv->ring.rptr_update = /* init->rptr_update */ 4096; | ||
1450 | dev_priv->ring.rptr_update_l2qw = order_base_2( /* init->rptr_update */ 4096 / 8); | ||
1451 | |||
1452 | dev_priv->ring.fetch_size = /* init->fetch_size */ 32; | ||
1453 | dev_priv->ring.fetch_size_l2ow = order_base_2( /* init->fetch_size */ 32 / 16); | ||
1454 | dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; | ||
1455 | |||
1456 | dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; | ||
1457 | |||
1458 | #if IS_ENABLED(CONFIG_AGP) | ||
1459 | if (dev_priv->flags & RADEON_IS_AGP) { | ||
1460 | /* Turn off PCI GART */ | ||
1461 | radeon_set_pcigart(dev_priv, 0); | ||
1462 | } else | ||
1463 | #endif | ||
1464 | { | ||
1465 | u32 sctrl; | ||
1466 | int ret; | ||
1467 | |||
1468 | dev_priv->gart_info.table_mask = DMA_BIT_MASK(32); | ||
1469 | /* if we have an offset set from userspace */ | ||
1470 | if (dev_priv->pcigart_offset_set) { | ||
1471 | dev_priv->gart_info.bus_addr = | ||
1472 | (resource_size_t)dev_priv->pcigart_offset + dev_priv->fb_location; | ||
1473 | dev_priv->gart_info.mapping.offset = | ||
1474 | dev_priv->pcigart_offset + dev_priv->fb_aper_offset; | ||
1475 | dev_priv->gart_info.mapping.size = | ||
1476 | dev_priv->gart_info.table_size; | ||
1477 | |||
1478 | drm_legacy_ioremap_wc(&dev_priv->gart_info.mapping, dev); | ||
1479 | dev_priv->gart_info.addr = | ||
1480 | dev_priv->gart_info.mapping.handle; | ||
1481 | |||
1482 | if (dev_priv->flags & RADEON_IS_PCIE) | ||
1483 | dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE; | ||
1484 | else | ||
1485 | dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI; | ||
1486 | dev_priv->gart_info.gart_table_location = | ||
1487 | DRM_ATI_GART_FB; | ||
1488 | |||
1489 | DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n", | ||
1490 | dev_priv->gart_info.addr, | ||
1491 | dev_priv->pcigart_offset); | ||
1492 | } else { | ||
1493 | if (dev_priv->flags & RADEON_IS_IGPGART) | ||
1494 | dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP; | ||
1495 | else | ||
1496 | dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI; | ||
1497 | dev_priv->gart_info.gart_table_location = | ||
1498 | DRM_ATI_GART_MAIN; | ||
1499 | dev_priv->gart_info.addr = NULL; | ||
1500 | dev_priv->gart_info.bus_addr = 0; | ||
1501 | if (dev_priv->flags & RADEON_IS_PCIE) { | ||
1502 | DRM_ERROR | ||
1503 | ("Cannot use PCI Express without GART in FB memory\n"); | ||
1504 | radeon_do_cleanup_cp(dev); | ||
1505 | return -EINVAL; | ||
1506 | } | ||
1507 | } | ||
1508 | |||
1509 | sctrl = RADEON_READ(RADEON_SURFACE_CNTL); | ||
1510 | RADEON_WRITE(RADEON_SURFACE_CNTL, 0); | ||
1511 | if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) | ||
1512 | ret = r600_page_table_init(dev); | ||
1513 | else | ||
1514 | ret = drm_ati_pcigart_init(dev, &dev_priv->gart_info); | ||
1515 | RADEON_WRITE(RADEON_SURFACE_CNTL, sctrl); | ||
1516 | |||
1517 | if (!ret) { | ||
1518 | DRM_ERROR("failed to init PCI GART!\n"); | ||
1519 | radeon_do_cleanup_cp(dev); | ||
1520 | return -ENOMEM; | ||
1521 | } | ||
1522 | |||
1523 | ret = radeon_setup_pcigart_surface(dev_priv); | ||
1524 | if (ret) { | ||
1525 | DRM_ERROR("failed to setup GART surface!\n"); | ||
1526 | if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) | ||
1527 | r600_page_table_cleanup(dev, &dev_priv->gart_info); | ||
1528 | else | ||
1529 | drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info); | ||
1530 | radeon_do_cleanup_cp(dev); | ||
1531 | return ret; | ||
1532 | } | ||
1533 | |||
1534 | /* Turn on PCI GART */ | ||
1535 | radeon_set_pcigart(dev_priv, 1); | ||
1536 | } | ||
1537 | |||
1538 | if (!dev_priv->me_fw) { | ||
1539 | int err = radeon_cp_init_microcode(dev_priv); | ||
1540 | if (err) { | ||
1541 | DRM_ERROR("Failed to load firmware!\n"); | ||
1542 | radeon_do_cleanup_cp(dev); | ||
1543 | return err; | ||
1544 | } | ||
1545 | } | ||
1546 | radeon_cp_load_microcode(dev_priv); | ||
1547 | radeon_cp_init_ring_buffer(dev, dev_priv, file_priv); | ||
1548 | |||
1549 | dev_priv->last_buf = 0; | ||
1550 | |||
1551 | radeon_do_engine_reset(dev); | ||
1552 | radeon_test_writeback(dev_priv); | ||
1553 | |||
1554 | return 0; | ||
1555 | } | ||
1556 | |||
1557 | static int radeon_do_cleanup_cp(struct drm_device * dev) | ||
1558 | { | ||
1559 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1560 | DRM_DEBUG("\n"); | ||
1561 | |||
1562 | /* Make sure interrupts are disabled here because the uninstall ioctl | ||
1563 | * may not have been called from userspace and after dev_private | ||
1564 | * is freed, it's too late. | ||
1565 | */ | ||
1566 | if (dev->irq_enabled) | ||
1567 | drm_irq_uninstall(dev); | ||
1568 | |||
1569 | #if IS_ENABLED(CONFIG_AGP) | ||
1570 | if (dev_priv->flags & RADEON_IS_AGP) { | ||
1571 | if (dev_priv->cp_ring != NULL) { | ||
1572 | drm_legacy_ioremapfree(dev_priv->cp_ring, dev); | ||
1573 | dev_priv->cp_ring = NULL; | ||
1574 | } | ||
1575 | if (dev_priv->ring_rptr != NULL) { | ||
1576 | drm_legacy_ioremapfree(dev_priv->ring_rptr, dev); | ||
1577 | dev_priv->ring_rptr = NULL; | ||
1578 | } | ||
1579 | if (dev->agp_buffer_map != NULL) { | ||
1580 | drm_legacy_ioremapfree(dev->agp_buffer_map, dev); | ||
1581 | dev->agp_buffer_map = NULL; | ||
1582 | } | ||
1583 | } else | ||
1584 | #endif | ||
1585 | { | ||
1586 | |||
1587 | if (dev_priv->gart_info.bus_addr) { | ||
1588 | /* Turn off PCI GART */ | ||
1589 | radeon_set_pcigart(dev_priv, 0); | ||
1590 | if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS600) | ||
1591 | r600_page_table_cleanup(dev, &dev_priv->gart_info); | ||
1592 | else { | ||
1593 | if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info)) | ||
1594 | DRM_ERROR("failed to cleanup PCI GART!\n"); | ||
1595 | } | ||
1596 | } | ||
1597 | |||
1598 | if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) | ||
1599 | { | ||
1600 | drm_legacy_ioremapfree(&dev_priv->gart_info.mapping, dev); | ||
1601 | dev_priv->gart_info.addr = NULL; | ||
1602 | } | ||
1603 | } | ||
1604 | /* only clear to the start of flags */ | ||
1605 | memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags)); | ||
1606 | |||
1607 | return 0; | ||
1608 | } | ||
1609 | |||
1610 | /* This code will reinit the Radeon CP hardware after a resume from disc. | ||
1611 | * AFAIK, it would be very difficult to pickle the state at suspend time, so | ||
1612 | * here we make sure that all Radeon hardware initialisation is re-done without | ||
1613 | * affecting running applications. | ||
1614 | * | ||
1615 | * Charl P. Botha <http://cpbotha.net> | ||
1616 | */ | ||
1617 | static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv) | ||
1618 | { | ||
1619 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1620 | |||
1621 | if (!dev_priv) { | ||
1622 | DRM_ERROR("Called with no initialization\n"); | ||
1623 | return -EINVAL; | ||
1624 | } | ||
1625 | |||
1626 | DRM_DEBUG("Starting radeon_do_resume_cp()\n"); | ||
1627 | |||
1628 | #if IS_ENABLED(CONFIG_AGP) | ||
1629 | if (dev_priv->flags & RADEON_IS_AGP) { | ||
1630 | /* Turn off PCI GART */ | ||
1631 | radeon_set_pcigart(dev_priv, 0); | ||
1632 | } else | ||
1633 | #endif | ||
1634 | { | ||
1635 | /* Turn on PCI GART */ | ||
1636 | radeon_set_pcigart(dev_priv, 1); | ||
1637 | } | ||
1638 | |||
1639 | radeon_cp_load_microcode(dev_priv); | ||
1640 | radeon_cp_init_ring_buffer(dev, dev_priv, file_priv); | ||
1641 | |||
1642 | dev_priv->have_z_offset = 0; | ||
1643 | radeon_do_engine_reset(dev); | ||
1644 | radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); | ||
1645 | |||
1646 | DRM_DEBUG("radeon_do_resume_cp() complete\n"); | ||
1647 | |||
1648 | return 0; | ||
1649 | } | ||
1650 | |||
1651 | int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv) | ||
1652 | { | ||
1653 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1654 | drm_radeon_init_t *init = data; | ||
1655 | |||
1656 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
1657 | |||
1658 | if (init->func == RADEON_INIT_R300_CP) | ||
1659 | r300_init_reg_flags(dev); | ||
1660 | |||
1661 | switch (init->func) { | ||
1662 | case RADEON_INIT_CP: | ||
1663 | case RADEON_INIT_R200_CP: | ||
1664 | case RADEON_INIT_R300_CP: | ||
1665 | return radeon_do_init_cp(dev, init, file_priv); | ||
1666 | case RADEON_INIT_R600_CP: | ||
1667 | return r600_do_init_cp(dev, init, file_priv); | ||
1668 | case RADEON_CLEANUP_CP: | ||
1669 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) | ||
1670 | return r600_do_cleanup_cp(dev); | ||
1671 | else | ||
1672 | return radeon_do_cleanup_cp(dev); | ||
1673 | } | ||
1674 | |||
1675 | return -EINVAL; | ||
1676 | } | ||
1677 | |||
1678 | int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv) | ||
1679 | { | ||
1680 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1681 | DRM_DEBUG("\n"); | ||
1682 | |||
1683 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
1684 | |||
1685 | if (dev_priv->cp_running) { | ||
1686 | DRM_DEBUG("while CP running\n"); | ||
1687 | return 0; | ||
1688 | } | ||
1689 | if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) { | ||
1690 | DRM_DEBUG("called with bogus CP mode (%d)\n", | ||
1691 | dev_priv->cp_mode); | ||
1692 | return 0; | ||
1693 | } | ||
1694 | |||
1695 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) | ||
1696 | r600_do_cp_start(dev_priv); | ||
1697 | else | ||
1698 | radeon_do_cp_start(dev_priv); | ||
1699 | |||
1700 | return 0; | ||
1701 | } | ||
1702 | |||
1703 | /* Stop the CP. The engine must have been idled before calling this | ||
1704 | * routine. | ||
1705 | */ | ||
1706 | int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv) | ||
1707 | { | ||
1708 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1709 | drm_radeon_cp_stop_t *stop = data; | ||
1710 | int ret; | ||
1711 | DRM_DEBUG("\n"); | ||
1712 | |||
1713 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
1714 | |||
1715 | if (!dev_priv->cp_running) | ||
1716 | return 0; | ||
1717 | |||
1718 | /* Flush any pending CP commands. This ensures any outstanding | ||
1719 | * commands are exectuted by the engine before we turn it off. | ||
1720 | */ | ||
1721 | if (stop->flush) { | ||
1722 | radeon_do_cp_flush(dev_priv); | ||
1723 | } | ||
1724 | |||
1725 | /* If we fail to make the engine go idle, we return an error | ||
1726 | * code so that the DRM ioctl wrapper can try again. | ||
1727 | */ | ||
1728 | if (stop->idle) { | ||
1729 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) | ||
1730 | ret = r600_do_cp_idle(dev_priv); | ||
1731 | else | ||
1732 | ret = radeon_do_cp_idle(dev_priv); | ||
1733 | if (ret) | ||
1734 | return ret; | ||
1735 | } | ||
1736 | |||
1737 | /* Finally, we can turn off the CP. If the engine isn't idle, | ||
1738 | * we will get some dropped triangles as they won't be fully | ||
1739 | * rendered before the CP is shut down. | ||
1740 | */ | ||
1741 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) | ||
1742 | r600_do_cp_stop(dev_priv); | ||
1743 | else | ||
1744 | radeon_do_cp_stop(dev_priv); | ||
1745 | |||
1746 | /* Reset the engine */ | ||
1747 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) | ||
1748 | r600_do_engine_reset(dev); | ||
1749 | else | ||
1750 | radeon_do_engine_reset(dev); | ||
1751 | |||
1752 | return 0; | ||
1753 | } | ||
1754 | |||
1755 | void radeon_do_release(struct drm_device * dev) | ||
1756 | { | ||
1757 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1758 | int i, ret; | ||
1759 | |||
1760 | if (dev_priv) { | ||
1761 | if (dev_priv->cp_running) { | ||
1762 | /* Stop the cp */ | ||
1763 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) { | ||
1764 | while ((ret = r600_do_cp_idle(dev_priv)) != 0) { | ||
1765 | DRM_DEBUG("radeon_do_cp_idle %d\n", ret); | ||
1766 | #ifdef __linux__ | ||
1767 | schedule(); | ||
1768 | #else | ||
1769 | tsleep(&ret, PZERO, "rdnrel", 1); | ||
1770 | #endif | ||
1771 | } | ||
1772 | } else { | ||
1773 | while ((ret = radeon_do_cp_idle(dev_priv)) != 0) { | ||
1774 | DRM_DEBUG("radeon_do_cp_idle %d\n", ret); | ||
1775 | #ifdef __linux__ | ||
1776 | schedule(); | ||
1777 | #else | ||
1778 | tsleep(&ret, PZERO, "rdnrel", 1); | ||
1779 | #endif | ||
1780 | } | ||
1781 | } | ||
1782 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) { | ||
1783 | r600_do_cp_stop(dev_priv); | ||
1784 | r600_do_engine_reset(dev); | ||
1785 | } else { | ||
1786 | radeon_do_cp_stop(dev_priv); | ||
1787 | radeon_do_engine_reset(dev); | ||
1788 | } | ||
1789 | } | ||
1790 | |||
1791 | if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_R600) { | ||
1792 | /* Disable *all* interrupts */ | ||
1793 | if (dev_priv->mmio) /* remove this after permanent addmaps */ | ||
1794 | RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); | ||
1795 | |||
1796 | if (dev_priv->mmio) { /* remove all surfaces */ | ||
1797 | for (i = 0; i < RADEON_MAX_SURFACES; i++) { | ||
1798 | RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, 0); | ||
1799 | RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + | ||
1800 | 16 * i, 0); | ||
1801 | RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + | ||
1802 | 16 * i, 0); | ||
1803 | } | ||
1804 | } | ||
1805 | } | ||
1806 | |||
1807 | /* Free memory heap structures */ | ||
1808 | radeon_mem_takedown(&(dev_priv->gart_heap)); | ||
1809 | radeon_mem_takedown(&(dev_priv->fb_heap)); | ||
1810 | |||
1811 | /* deallocate kernel resources */ | ||
1812 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) | ||
1813 | r600_do_cleanup_cp(dev); | ||
1814 | else | ||
1815 | radeon_do_cleanup_cp(dev); | ||
1816 | release_firmware(dev_priv->me_fw); | ||
1817 | dev_priv->me_fw = NULL; | ||
1818 | release_firmware(dev_priv->pfp_fw); | ||
1819 | dev_priv->pfp_fw = NULL; | ||
1820 | } | ||
1821 | } | ||
1822 | |||
1823 | /* Just reset the CP ring. Called as part of an X Server engine reset. | ||
1824 | */ | ||
1825 | int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) | ||
1826 | { | ||
1827 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1828 | DRM_DEBUG("\n"); | ||
1829 | |||
1830 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
1831 | |||
1832 | if (!dev_priv) { | ||
1833 | DRM_DEBUG("called before init done\n"); | ||
1834 | return -EINVAL; | ||
1835 | } | ||
1836 | |||
1837 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) | ||
1838 | r600_do_cp_reset(dev_priv); | ||
1839 | else | ||
1840 | radeon_do_cp_reset(dev_priv); | ||
1841 | |||
1842 | /* The CP is no longer running after an engine reset */ | ||
1843 | dev_priv->cp_running = 0; | ||
1844 | |||
1845 | return 0; | ||
1846 | } | ||
1847 | |||
1848 | int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv) | ||
1849 | { | ||
1850 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1851 | DRM_DEBUG("\n"); | ||
1852 | |||
1853 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
1854 | |||
1855 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) | ||
1856 | return r600_do_cp_idle(dev_priv); | ||
1857 | else | ||
1858 | return radeon_do_cp_idle(dev_priv); | ||
1859 | } | ||
1860 | |||
1861 | /* Added by Charl P. Botha to call radeon_do_resume_cp(). | ||
1862 | */ | ||
1863 | int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv) | ||
1864 | { | ||
1865 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1866 | DRM_DEBUG("\n"); | ||
1867 | |||
1868 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) | ||
1869 | return r600_do_resume_cp(dev, file_priv); | ||
1870 | else | ||
1871 | return radeon_do_resume_cp(dev, file_priv); | ||
1872 | } | ||
1873 | |||
1874 | int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) | ||
1875 | { | ||
1876 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1877 | DRM_DEBUG("\n"); | ||
1878 | |||
1879 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
1880 | |||
1881 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) | ||
1882 | return r600_do_engine_reset(dev); | ||
1883 | else | ||
1884 | return radeon_do_engine_reset(dev); | ||
1885 | } | ||
1886 | |||
1887 | /* ================================================================ | ||
1888 | * Fullscreen mode | ||
1889 | */ | ||
1890 | |||
1891 | /* KW: Deprecated to say the least: | ||
1892 | */ | ||
1893 | int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv) | ||
1894 | { | ||
1895 | return 0; | ||
1896 | } | ||
1897 | |||
1898 | /* ================================================================ | ||
1899 | * Freelist management | ||
1900 | */ | ||
1901 | |||
1902 | /* Original comment: FIXME: ROTATE_BUFS is a hack to cycle through | ||
1903 | * bufs until freelist code is used. Note this hides a problem with | ||
1904 | * the scratch register * (used to keep track of last buffer | ||
1905 | * completed) being written to before * the last buffer has actually | ||
1906 | * completed rendering. | ||
1907 | * | ||
1908 | * KW: It's also a good way to find free buffers quickly. | ||
1909 | * | ||
1910 | * KW: Ideally this loop wouldn't exist, and freelist_get wouldn't | ||
1911 | * sleep. However, bugs in older versions of radeon_accel.c mean that | ||
1912 | * we essentially have to do this, else old clients will break. | ||
1913 | * | ||
1914 | * However, it does leave open a potential deadlock where all the | ||
1915 | * buffers are held by other clients, which can't release them because | ||
1916 | * they can't get the lock. | ||
1917 | */ | ||
1918 | |||
1919 | struct drm_buf *radeon_freelist_get(struct drm_device * dev) | ||
1920 | { | ||
1921 | struct drm_device_dma *dma = dev->dma; | ||
1922 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1923 | drm_radeon_buf_priv_t *buf_priv; | ||
1924 | struct drm_buf *buf; | ||
1925 | int i, t; | ||
1926 | int start; | ||
1927 | |||
1928 | if (++dev_priv->last_buf >= dma->buf_count) | ||
1929 | dev_priv->last_buf = 0; | ||
1930 | |||
1931 | start = dev_priv->last_buf; | ||
1932 | |||
1933 | for (t = 0; t < dev_priv->usec_timeout; t++) { | ||
1934 | u32 done_age = GET_SCRATCH(dev_priv, 1); | ||
1935 | DRM_DEBUG("done_age = %d\n", done_age); | ||
1936 | for (i = 0; i < dma->buf_count; i++) { | ||
1937 | buf = dma->buflist[start]; | ||
1938 | buf_priv = buf->dev_private; | ||
1939 | if (buf->file_priv == NULL || (buf->pending && | ||
1940 | buf_priv->age <= | ||
1941 | done_age)) { | ||
1942 | dev_priv->stats.requested_bufs++; | ||
1943 | buf->pending = 0; | ||
1944 | return buf; | ||
1945 | } | ||
1946 | if (++start >= dma->buf_count) | ||
1947 | start = 0; | ||
1948 | } | ||
1949 | |||
1950 | if (t) { | ||
1951 | DRM_UDELAY(1); | ||
1952 | dev_priv->stats.freelist_loops++; | ||
1953 | } | ||
1954 | } | ||
1955 | |||
1956 | return NULL; | ||
1957 | } | ||
1958 | |||
1959 | void radeon_freelist_reset(struct drm_device * dev) | ||
1960 | { | ||
1961 | struct drm_device_dma *dma = dev->dma; | ||
1962 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
1963 | int i; | ||
1964 | |||
1965 | dev_priv->last_buf = 0; | ||
1966 | for (i = 0; i < dma->buf_count; i++) { | ||
1967 | struct drm_buf *buf = dma->buflist[i]; | ||
1968 | drm_radeon_buf_priv_t *buf_priv = buf->dev_private; | ||
1969 | buf_priv->age = 0; | ||
1970 | } | ||
1971 | } | ||
1972 | |||
1973 | /* ================================================================ | ||
1974 | * CP command submission | ||
1975 | */ | ||
1976 | |||
1977 | int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n) | ||
1978 | { | ||
1979 | drm_radeon_ring_buffer_t *ring = &dev_priv->ring; | ||
1980 | int i; | ||
1981 | u32 last_head = GET_RING_HEAD(dev_priv); | ||
1982 | |||
1983 | for (i = 0; i < dev_priv->usec_timeout; i++) { | ||
1984 | u32 head = GET_RING_HEAD(dev_priv); | ||
1985 | |||
1986 | ring->space = (head - ring->tail) * sizeof(u32); | ||
1987 | if (ring->space <= 0) | ||
1988 | ring->space += ring->size; | ||
1989 | if (ring->space > n) | ||
1990 | return 0; | ||
1991 | |||
1992 | dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; | ||
1993 | |||
1994 | if (head != last_head) | ||
1995 | i = 0; | ||
1996 | last_head = head; | ||
1997 | |||
1998 | DRM_UDELAY(1); | ||
1999 | } | ||
2000 | |||
2001 | /* FIXME: This return value is ignored in the BEGIN_RING macro! */ | ||
2002 | #if RADEON_FIFO_DEBUG | ||
2003 | radeon_status(dev_priv); | ||
2004 | DRM_ERROR("failed!\n"); | ||
2005 | #endif | ||
2006 | return -EBUSY; | ||
2007 | } | ||
2008 | |||
2009 | static int radeon_cp_get_buffers(struct drm_device *dev, | ||
2010 | struct drm_file *file_priv, | ||
2011 | struct drm_dma * d) | ||
2012 | { | ||
2013 | int i; | ||
2014 | struct drm_buf *buf; | ||
2015 | |||
2016 | for (i = d->granted_count; i < d->request_count; i++) { | ||
2017 | buf = radeon_freelist_get(dev); | ||
2018 | if (!buf) | ||
2019 | return -EBUSY; /* NOTE: broken client */ | ||
2020 | |||
2021 | buf->file_priv = file_priv; | ||
2022 | |||
2023 | if (copy_to_user(&d->request_indices[i], &buf->idx, | ||
2024 | sizeof(buf->idx))) | ||
2025 | return -EFAULT; | ||
2026 | if (copy_to_user(&d->request_sizes[i], &buf->total, | ||
2027 | sizeof(buf->total))) | ||
2028 | return -EFAULT; | ||
2029 | |||
2030 | d->granted_count++; | ||
2031 | } | ||
2032 | return 0; | ||
2033 | } | ||
2034 | |||
2035 | int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) | ||
2036 | { | ||
2037 | struct drm_device_dma *dma = dev->dma; | ||
2038 | int ret = 0; | ||
2039 | struct drm_dma *d = data; | ||
2040 | |||
2041 | LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
2042 | |||
2043 | /* Please don't send us buffers. | ||
2044 | */ | ||
2045 | if (d->send_count != 0) { | ||
2046 | DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", | ||
2047 | DRM_CURRENTPID, d->send_count); | ||
2048 | return -EINVAL; | ||
2049 | } | ||
2050 | |||
2051 | /* We'll send you buffers. | ||
2052 | */ | ||
2053 | if (d->request_count < 0 || d->request_count > dma->buf_count) { | ||
2054 | DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", | ||
2055 | DRM_CURRENTPID, d->request_count, dma->buf_count); | ||
2056 | return -EINVAL; | ||
2057 | } | ||
2058 | |||
2059 | d->granted_count = 0; | ||
2060 | |||
2061 | if (d->request_count) { | ||
2062 | ret = radeon_cp_get_buffers(dev, file_priv, d); | ||
2063 | } | ||
2064 | |||
2065 | return ret; | ||
2066 | } | ||
2067 | |||
2068 | int radeon_driver_load(struct drm_device *dev, unsigned long flags) | ||
2069 | { | ||
2070 | drm_radeon_private_t *dev_priv; | ||
2071 | int ret = 0; | ||
2072 | |||
2073 | dev_priv = kzalloc(sizeof(drm_radeon_private_t), GFP_KERNEL); | ||
2074 | if (dev_priv == NULL) | ||
2075 | return -ENOMEM; | ||
2076 | |||
2077 | dev->dev_private = (void *)dev_priv; | ||
2078 | dev_priv->flags = flags; | ||
2079 | |||
2080 | switch (flags & RADEON_FAMILY_MASK) { | ||
2081 | case CHIP_R100: | ||
2082 | case CHIP_RV200: | ||
2083 | case CHIP_R200: | ||
2084 | case CHIP_R300: | ||
2085 | case CHIP_R350: | ||
2086 | case CHIP_R420: | ||
2087 | case CHIP_R423: | ||
2088 | case CHIP_RV410: | ||
2089 | case CHIP_RV515: | ||
2090 | case CHIP_R520: | ||
2091 | case CHIP_RV570: | ||
2092 | case CHIP_R580: | ||
2093 | dev_priv->flags |= RADEON_HAS_HIERZ; | ||
2094 | break; | ||
2095 | default: | ||
2096 | /* all other chips have no hierarchical z buffer */ | ||
2097 | break; | ||
2098 | } | ||
2099 | |||
2100 | pci_set_master(dev->pdev); | ||
2101 | |||
2102 | if (drm_pci_device_is_agp(dev)) | ||
2103 | dev_priv->flags |= RADEON_IS_AGP; | ||
2104 | else if (pci_is_pcie(dev->pdev)) | ||
2105 | dev_priv->flags |= RADEON_IS_PCIE; | ||
2106 | else | ||
2107 | dev_priv->flags |= RADEON_IS_PCI; | ||
2108 | |||
2109 | ret = drm_legacy_addmap(dev, pci_resource_start(dev->pdev, 2), | ||
2110 | pci_resource_len(dev->pdev, 2), _DRM_REGISTERS, | ||
2111 | _DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio); | ||
2112 | if (ret != 0) | ||
2113 | return ret; | ||
2114 | |||
2115 | ret = drm_vblank_init(dev, 2); | ||
2116 | if (ret) { | ||
2117 | radeon_driver_unload(dev); | ||
2118 | return ret; | ||
2119 | } | ||
2120 | |||
2121 | DRM_DEBUG("%s card detected\n", | ||
2122 | ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI")))); | ||
2123 | return ret; | ||
2124 | } | ||
2125 | |||
2126 | int radeon_master_create(struct drm_device *dev, struct drm_master *master) | ||
2127 | { | ||
2128 | struct drm_radeon_master_private *master_priv; | ||
2129 | unsigned long sareapage; | ||
2130 | int ret; | ||
2131 | |||
2132 | master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL); | ||
2133 | if (!master_priv) | ||
2134 | return -ENOMEM; | ||
2135 | |||
2136 | /* prebuild the SAREA */ | ||
2137 | sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE); | ||
2138 | ret = drm_legacy_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, | ||
2139 | &master_priv->sarea); | ||
2140 | if (ret) { | ||
2141 | DRM_ERROR("SAREA setup failed\n"); | ||
2142 | kfree(master_priv); | ||
2143 | return ret; | ||
2144 | } | ||
2145 | master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea); | ||
2146 | master_priv->sarea_priv->pfCurrentPage = 0; | ||
2147 | |||
2148 | master->driver_priv = master_priv; | ||
2149 | return 0; | ||
2150 | } | ||
2151 | |||
2152 | void radeon_master_destroy(struct drm_device *dev, struct drm_master *master) | ||
2153 | { | ||
2154 | struct drm_radeon_master_private *master_priv = master->driver_priv; | ||
2155 | |||
2156 | if (!master_priv) | ||
2157 | return; | ||
2158 | |||
2159 | if (master_priv->sarea_priv && | ||
2160 | master_priv->sarea_priv->pfCurrentPage != 0) | ||
2161 | radeon_cp_dispatch_flip(dev, master); | ||
2162 | |||
2163 | master_priv->sarea_priv = NULL; | ||
2164 | if (master_priv->sarea) | ||
2165 | drm_legacy_rmmap_locked(dev, master_priv->sarea); | ||
2166 | |||
2167 | kfree(master_priv); | ||
2168 | |||
2169 | master->driver_priv = NULL; | ||
2170 | } | ||
2171 | |||
2172 | /* Create mappings for registers and framebuffer so userland doesn't necessarily | ||
2173 | * have to find them. | ||
2174 | */ | ||
2175 | int radeon_driver_firstopen(struct drm_device *dev) | ||
2176 | { | ||
2177 | int ret; | ||
2178 | drm_local_map_t *map; | ||
2179 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
2180 | |||
2181 | dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; | ||
2182 | |||
2183 | dev_priv->fb_aper_offset = pci_resource_start(dev->pdev, 0); | ||
2184 | ret = drm_legacy_addmap(dev, dev_priv->fb_aper_offset, | ||
2185 | pci_resource_len(dev->pdev, 0), | ||
2186 | _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, &map); | ||
2187 | if (ret != 0) | ||
2188 | return ret; | ||
2189 | |||
2190 | return 0; | ||
2191 | } | ||
2192 | |||
2193 | int radeon_driver_unload(struct drm_device *dev) | ||
2194 | { | ||
2195 | drm_radeon_private_t *dev_priv = dev->dev_private; | ||
2196 | |||
2197 | DRM_DEBUG("\n"); | ||
2198 | |||
2199 | drm_legacy_rmmap(dev, dev_priv->mmio); | ||
2200 | |||
2201 | kfree(dev_priv); | ||
2202 | |||
2203 | dev->dev_private = NULL; | ||
2204 | return 0; | ||
2205 | } | ||
2206 | |||
2207 | void radeon_commit_ring(drm_radeon_private_t *dev_priv) | ||
2208 | { | ||
2209 | int i; | ||
2210 | u32 *ring; | ||
2211 | int tail_aligned; | ||
2212 | |||
2213 | /* check if the ring is padded out to 16-dword alignment */ | ||
2214 | |||
2215 | tail_aligned = dev_priv->ring.tail & (RADEON_RING_ALIGN-1); | ||
2216 | if (tail_aligned) { | ||
2217 | int num_p2 = RADEON_RING_ALIGN - tail_aligned; | ||
2218 | |||
2219 | ring = dev_priv->ring.start; | ||
2220 | /* pad with some CP_PACKET2 */ | ||
2221 | for (i = 0; i < num_p2; i++) | ||
2222 | ring[dev_priv->ring.tail + i] = CP_PACKET2(); | ||
2223 | |||
2224 | dev_priv->ring.tail += i; | ||
2225 | |||
2226 | dev_priv->ring.space -= num_p2 * sizeof(u32); | ||
2227 | } | ||
2228 | |||
2229 | dev_priv->ring.tail &= dev_priv->ring.tail_mask; | ||
2230 | |||
2231 | mb(); | ||
2232 | GET_RING_HEAD( dev_priv ); | ||
2233 | |||
2234 | if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) { | ||
2235 | RADEON_WRITE(R600_CP_RB_WPTR, dev_priv->ring.tail); | ||
2236 | /* read from PCI bus to ensure correct posting */ | ||
2237 | RADEON_READ(R600_CP_RB_RPTR); | ||
2238 | } else { | ||
2239 | RADEON_WRITE(RADEON_CP_RB_WPTR, dev_priv->ring.tail); | ||
2240 | /* read from PCI bus to ensure correct posting */ | ||
2241 | RADEON_READ(RADEON_CP_RB_RPTR); | ||
2242 | } | ||
2243 | } | ||