diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/r420.c')
-rw-r--r-- | drivers/gpu/drm/radeon/r420.c | 293 |
1 files changed, 202 insertions, 91 deletions
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 551d6996d3f2..e57b9ba4aaf3 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
@@ -29,48 +29,13 @@ | |||
29 | #include "drmP.h" | 29 | #include "drmP.h" |
30 | #include "radeon_reg.h" | 30 | #include "radeon_reg.h" |
31 | #include "radeon.h" | 31 | #include "radeon.h" |
32 | #include "atom.h" | ||
32 | #include "r420d.h" | 33 | #include "r420d.h" |
33 | 34 | ||
34 | /* r420,r423,rv410 depends on : */ | ||
35 | void r100_pci_gart_disable(struct radeon_device *rdev); | ||
36 | void r100_hdp_reset(struct radeon_device *rdev); | ||
37 | void r100_mc_setup(struct radeon_device *rdev); | ||
38 | int r100_gui_wait_for_idle(struct radeon_device *rdev); | ||
39 | void r100_mc_disable_clients(struct radeon_device *rdev); | ||
40 | void r300_vram_info(struct radeon_device *rdev); | ||
41 | int r300_mc_wait_for_idle(struct radeon_device *rdev); | ||
42 | int rv370_pcie_gart_enable(struct radeon_device *rdev); | ||
43 | void rv370_pcie_gart_disable(struct radeon_device *rdev); | ||
44 | |||
45 | /* This files gather functions specifics to : | ||
46 | * r420,r423,rv410 | ||
47 | * | ||
48 | * Some of these functions might be used by newer ASICs. | ||
49 | */ | ||
50 | void r420_gpu_init(struct radeon_device *rdev); | ||
51 | int r420_debugfs_pipes_info_init(struct radeon_device *rdev); | ||
52 | |||
53 | |||
54 | /* | ||
55 | * MC | ||
56 | */ | ||
57 | int r420_mc_init(struct radeon_device *rdev) | 35 | int r420_mc_init(struct radeon_device *rdev) |
58 | { | 36 | { |
59 | int r; | 37 | int r; |
60 | 38 | ||
61 | if (r100_debugfs_rbbm_init(rdev)) { | ||
62 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | ||
63 | } | ||
64 | if (r420_debugfs_pipes_info_init(rdev)) { | ||
65 | DRM_ERROR("Failed to register debugfs file for pipes !\n"); | ||
66 | } | ||
67 | |||
68 | r420_gpu_init(rdev); | ||
69 | r100_pci_gart_disable(rdev); | ||
70 | if (rdev->flags & RADEON_IS_PCIE) { | ||
71 | rv370_pcie_gart_disable(rdev); | ||
72 | } | ||
73 | |||
74 | /* Setup GPU memory space */ | 39 | /* Setup GPU memory space */ |
75 | rdev->mc.vram_location = 0xFFFFFFFFUL; | 40 | rdev->mc.vram_location = 0xFFFFFFFFUL; |
76 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | 41 | rdev->mc.gtt_location = 0xFFFFFFFFUL; |
@@ -88,38 +53,9 @@ int r420_mc_init(struct radeon_device *rdev) | |||
88 | if (r) { | 53 | if (r) { |
89 | return r; | 54 | return r; |
90 | } | 55 | } |
91 | |||
92 | /* Program GPU memory space */ | ||
93 | r100_mc_disable_clients(rdev); | ||
94 | if (r300_mc_wait_for_idle(rdev)) { | ||
95 | printk(KERN_WARNING "Failed to wait MC idle while " | ||
96 | "programming pipes. Bad things might happen.\n"); | ||
97 | } | ||
98 | r100_mc_setup(rdev); | ||
99 | return 0; | 56 | return 0; |
100 | } | 57 | } |
101 | 58 | ||
102 | void r420_mc_fini(struct radeon_device *rdev) | ||
103 | { | ||
104 | if (rdev->flags & RADEON_IS_PCIE) { | ||
105 | rv370_pcie_gart_disable(rdev); | ||
106 | radeon_gart_table_vram_free(rdev); | ||
107 | } else { | ||
108 | r100_pci_gart_disable(rdev); | ||
109 | radeon_gart_table_ram_free(rdev); | ||
110 | } | ||
111 | radeon_gart_fini(rdev); | ||
112 | } | ||
113 | |||
114 | |||
115 | /* | ||
116 | * Global GPU functions | ||
117 | */ | ||
118 | void r420_errata(struct radeon_device *rdev) | ||
119 | { | ||
120 | rdev->pll_errata = 0; | ||
121 | } | ||
122 | |||
123 | void r420_pipes_init(struct radeon_device *rdev) | 59 | void r420_pipes_init(struct radeon_device *rdev) |
124 | { | 60 | { |
125 | unsigned tmp; | 61 | unsigned tmp; |
@@ -185,25 +121,216 @@ void r420_pipes_init(struct radeon_device *rdev) | |||
185 | rdev->num_gb_pipes, rdev->num_z_pipes); | 121 | rdev->num_gb_pipes, rdev->num_z_pipes); |
186 | } | 122 | } |
187 | 123 | ||
188 | void r420_gpu_init(struct radeon_device *rdev) | 124 | u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg) |
189 | { | 125 | { |
190 | r100_hdp_reset(rdev); | 126 | u32 r; |
127 | |||
128 | WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg)); | ||
129 | r = RREG32(R_0001FC_MC_IND_DATA); | ||
130 | return r; | ||
131 | } | ||
132 | |||
133 | void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v) | ||
134 | { | ||
135 | WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) | | ||
136 | S_0001F8_MC_IND_WR_EN(1)); | ||
137 | WREG32(R_0001FC_MC_IND_DATA, v); | ||
138 | } | ||
139 | |||
140 | static void r420_debugfs(struct radeon_device *rdev) | ||
141 | { | ||
142 | if (r100_debugfs_rbbm_init(rdev)) { | ||
143 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | ||
144 | } | ||
145 | if (r420_debugfs_pipes_info_init(rdev)) { | ||
146 | DRM_ERROR("Failed to register debugfs file for pipes !\n"); | ||
147 | } | ||
148 | } | ||
149 | |||
150 | static void r420_clock_resume(struct radeon_device *rdev) | ||
151 | { | ||
152 | u32 sclk_cntl; | ||
153 | sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL); | ||
154 | sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); | ||
155 | if (rdev->family == CHIP_R420) | ||
156 | sclk_cntl |= S_00000D_FORCE_PX(1) | S_00000D_FORCE_TX(1); | ||
157 | WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl); | ||
158 | } | ||
159 | |||
160 | int r420_resume(struct radeon_device *rdev) | ||
161 | { | ||
162 | int r; | ||
163 | |||
164 | /* Resume clock before doing reset */ | ||
165 | r420_clock_resume(rdev); | ||
166 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
167 | if (radeon_gpu_reset(rdev)) { | ||
168 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
169 | RREG32(R_000E40_RBBM_STATUS), | ||
170 | RREG32(R_0007C0_CP_STAT)); | ||
171 | } | ||
172 | /* check if cards are posted or not */ | ||
173 | if (rdev->is_atom_bios) { | ||
174 | atom_asic_init(rdev->mode_info.atom_context); | ||
175 | } else { | ||
176 | radeon_combios_asic_init(rdev->ddev); | ||
177 | } | ||
178 | /* Resume clock after posting */ | ||
179 | r420_clock_resume(rdev); | ||
180 | r300_mc_program(rdev); | ||
181 | /* Initialize GART (initialize after TTM so we can allocate | ||
182 | * memory through TTM but finalize after TTM) */ | ||
183 | r = radeon_gart_enable(rdev); | ||
184 | if (r) { | ||
185 | dev_err(rdev->dev, "failled initializing GART (%d).\n", r); | ||
186 | return r; | ||
187 | } | ||
191 | r420_pipes_init(rdev); | 188 | r420_pipes_init(rdev); |
192 | if (r300_mc_wait_for_idle(rdev)) { | 189 | /* Enable IRQ */ |
193 | printk(KERN_WARNING "Failed to wait MC idle while " | 190 | rdev->irq.sw_int = true; |
194 | "programming pipes. Bad things might happen.\n"); | 191 | r100_irq_set(rdev); |
192 | /* 1M ring buffer */ | ||
193 | r = r100_cp_init(rdev, 1024 * 1024); | ||
194 | if (r) { | ||
195 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | ||
196 | return r; | ||
197 | } | ||
198 | r = r100_wb_init(rdev); | ||
199 | if (r) { | ||
200 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
195 | } | 201 | } |
202 | r = r100_ib_init(rdev); | ||
203 | if (r) { | ||
204 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | ||
205 | return r; | ||
206 | } | ||
207 | return 0; | ||
196 | } | 208 | } |
197 | 209 | ||
210 | int r420_suspend(struct radeon_device *rdev) | ||
211 | { | ||
212 | r100_cp_disable(rdev); | ||
213 | r100_wb_disable(rdev); | ||
214 | r100_irq_disable(rdev); | ||
215 | radeon_gart_disable(rdev); | ||
216 | return 0; | ||
217 | } | ||
198 | 218 | ||
199 | /* | 219 | void r420_fini(struct radeon_device *rdev) |
200 | * r420,r423,rv410 VRAM info | ||
201 | */ | ||
202 | void r420_vram_info(struct radeon_device *rdev) | ||
203 | { | 220 | { |
204 | r300_vram_info(rdev); | 221 | r100_cp_fini(rdev); |
222 | r100_wb_fini(rdev); | ||
223 | r100_ib_fini(rdev); | ||
224 | radeon_gem_fini(rdev); | ||
225 | if (rdev->flags & RADEON_IS_PCIE) { | ||
226 | rv370_pcie_gart_disable(rdev); | ||
227 | radeon_gart_table_vram_free(rdev); | ||
228 | } else { | ||
229 | r100_pci_gart_disable(rdev); | ||
230 | radeon_gart_table_ram_free(rdev); | ||
231 | } | ||
232 | radeon_gart_fini(rdev); | ||
233 | radeon_agp_fini(rdev); | ||
234 | radeon_irq_kms_fini(rdev); | ||
235 | radeon_fence_driver_fini(rdev); | ||
236 | radeon_object_fini(rdev); | ||
237 | if (rdev->is_atom_bios) { | ||
238 | radeon_atombios_fini(rdev); | ||
239 | } else { | ||
240 | radeon_combios_fini(rdev); | ||
241 | } | ||
242 | kfree(rdev->bios); | ||
243 | rdev->bios = NULL; | ||
205 | } | 244 | } |
206 | 245 | ||
246 | int r420_init(struct radeon_device *rdev) | ||
247 | { | ||
248 | int r; | ||
249 | |||
250 | rdev->new_init_path = true; | ||
251 | /* Initialize scratch registers */ | ||
252 | radeon_scratch_init(rdev); | ||
253 | /* Initialize surface registers */ | ||
254 | radeon_surface_init(rdev); | ||
255 | /* TODO: disable VGA need to use VGA request */ | ||
256 | /* BIOS*/ | ||
257 | if (!radeon_get_bios(rdev)) { | ||
258 | if (ASIC_IS_AVIVO(rdev)) | ||
259 | return -EINVAL; | ||
260 | } | ||
261 | if (rdev->is_atom_bios) { | ||
262 | r = radeon_atombios_init(rdev); | ||
263 | if (r) { | ||
264 | return r; | ||
265 | } | ||
266 | } else { | ||
267 | r = radeon_combios_init(rdev); | ||
268 | if (r) { | ||
269 | return r; | ||
270 | } | ||
271 | } | ||
272 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
273 | if (radeon_gpu_reset(rdev)) { | ||
274 | dev_warn(rdev->dev, | ||
275 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | ||
276 | RREG32(R_000E40_RBBM_STATUS), | ||
277 | RREG32(R_0007C0_CP_STAT)); | ||
278 | } | ||
279 | /* check if cards are posted or not */ | ||
280 | if (!radeon_card_posted(rdev) && rdev->bios) { | ||
281 | DRM_INFO("GPU not posted. posting now...\n"); | ||
282 | if (rdev->is_atom_bios) { | ||
283 | atom_asic_init(rdev->mode_info.atom_context); | ||
284 | } else { | ||
285 | radeon_combios_asic_init(rdev->ddev); | ||
286 | } | ||
287 | } | ||
288 | /* Initialize clocks */ | ||
289 | radeon_get_clock_info(rdev->ddev); | ||
290 | /* Get vram informations */ | ||
291 | r300_vram_info(rdev); | ||
292 | /* Initialize memory controller (also test AGP) */ | ||
293 | r = r420_mc_init(rdev); | ||
294 | if (r) { | ||
295 | return r; | ||
296 | } | ||
297 | r420_debugfs(rdev); | ||
298 | /* Fence driver */ | ||
299 | r = radeon_fence_driver_init(rdev); | ||
300 | if (r) { | ||
301 | return r; | ||
302 | } | ||
303 | r = radeon_irq_kms_init(rdev); | ||
304 | if (r) { | ||
305 | return r; | ||
306 | } | ||
307 | /* Memory manager */ | ||
308 | r = radeon_object_init(rdev); | ||
309 | if (r) { | ||
310 | return r; | ||
311 | } | ||
312 | r300_set_reg_safe(rdev); | ||
313 | r = r420_resume(rdev); | ||
314 | if (r) { | ||
315 | /* Somethings want wront with the accel init stop accel */ | ||
316 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | ||
317 | r420_suspend(rdev); | ||
318 | r100_cp_fini(rdev); | ||
319 | r100_wb_fini(rdev); | ||
320 | r100_ib_fini(rdev); | ||
321 | if (rdev->flags & RADEON_IS_PCIE) { | ||
322 | rv370_pcie_gart_disable(rdev); | ||
323 | radeon_gart_table_vram_free(rdev); | ||
324 | } else { | ||
325 | r100_pci_gart_disable(rdev); | ||
326 | radeon_gart_table_ram_free(rdev); | ||
327 | } | ||
328 | radeon_gart_fini(rdev); | ||
329 | radeon_agp_fini(rdev); | ||
330 | radeon_irq_kms_fini(rdev); | ||
331 | } | ||
332 | return 0; | ||
333 | } | ||
207 | 334 | ||
208 | /* | 335 | /* |
209 | * Debugfs info | 336 | * Debugfs info |
@@ -238,19 +365,3 @@ int r420_debugfs_pipes_info_init(struct radeon_device *rdev) | |||
238 | return 0; | 365 | return 0; |
239 | #endif | 366 | #endif |
240 | } | 367 | } |
241 | |||
242 | u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg) | ||
243 | { | ||
244 | u32 r; | ||
245 | |||
246 | WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg)); | ||
247 | r = RREG32(R_0001FC_MC_IND_DATA); | ||
248 | return r; | ||
249 | } | ||
250 | |||
251 | void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v) | ||
252 | { | ||
253 | WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) | | ||
254 | S_0001F8_MC_IND_WR_EN(1)); | ||
255 | WREG32(R_0001FC_MC_IND_DATA, v); | ||
256 | } | ||