aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_device.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /drivers/gpu/drm/radeon/radeon_device.c
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_device.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c621
1 files changed, 355 insertions, 266 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 41bb76fbe734..7b629e305560 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -26,15 +26,64 @@
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include <linux/console.h> 28#include <linux/console.h>
29#include <linux/slab.h>
29#include <drm/drmP.h> 30#include <drm/drmP.h>
30#include <drm/drm_crtc_helper.h> 31#include <drm/drm_crtc_helper.h>
31#include <drm/radeon_drm.h> 32#include <drm/radeon_drm.h>
32#include <linux/vgaarb.h> 33#include <linux/vgaarb.h>
34#include <linux/vga_switcheroo.h>
33#include "radeon_reg.h" 35#include "radeon_reg.h"
34#include "radeon.h" 36#include "radeon.h"
35#include "radeon_asic.h"
36#include "atom.h" 37#include "atom.h"
37 38
39static const char radeon_family_name[][16] = {
40 "R100",
41 "RV100",
42 "RS100",
43 "RV200",
44 "RS200",
45 "R200",
46 "RV250",
47 "RS300",
48 "RV280",
49 "R300",
50 "R350",
51 "RV350",
52 "RV380",
53 "R420",
54 "R423",
55 "RV410",
56 "RS400",
57 "RS480",
58 "RS600",
59 "RS690",
60 "RS740",
61 "RV515",
62 "R520",
63 "RV530",
64 "RV560",
65 "RV570",
66 "R580",
67 "R600",
68 "RV610",
69 "RV630",
70 "RV670",
71 "RV620",
72 "RV635",
73 "RS780",
74 "RS880",
75 "RV770",
76 "RV730",
77 "RV710",
78 "RV740",
79 "CEDAR",
80 "REDWOOD",
81 "JUNIPER",
82 "CYPRESS",
83 "HEMLOCK",
84 "LAST",
85};
86
38/* 87/*
39 * Clear GPU surface registers. 88 * Clear GPU surface registers.
40 */ 89 */
@@ -44,10 +93,11 @@ void radeon_surface_init(struct radeon_device *rdev)
44 if (rdev->family < CHIP_R600) { 93 if (rdev->family < CHIP_R600) {
45 int i; 94 int i;
46 95
47 for (i = 0; i < 8; i++) { 96 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
48 WREG32(RADEON_SURFACE0_INFO + 97 if (rdev->surface_regs[i].bo)
49 i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO), 98 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
50 0); 99 else
100 radeon_clear_surface_reg(rdev, i);
51 } 101 }
52 /* enable surfaces */ 102 /* enable surfaces */
53 WREG32(RADEON_SURFACE_CNTL, 0); 103 WREG32(RADEON_SURFACE_CNTL, 0);
@@ -99,80 +149,103 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
99 } 149 }
100} 150}
101 151
102/* 152/**
103 * MC common functions 153 * radeon_vram_location - try to find VRAM location
154 * @rdev: radeon device structure holding all necessary informations
155 * @mc: memory controller structure holding memory informations
156 * @base: base address at which to put VRAM
157 *
158 * Function will place try to place VRAM at base address provided
159 * as parameter (which is so far either PCI aperture address or
160 * for IGP TOM base address).
161 *
162 * If there is not enough space to fit the unvisible VRAM in the 32bits
163 * address space then we limit the VRAM size to the aperture.
164 *
165 * If we are using AGP and if the AGP aperture doesn't allow us to have
166 * room for all the VRAM than we restrict the VRAM to the PCI aperture
167 * size and print a warning.
168 *
169 * This function will never fails, worst case are limiting VRAM.
170 *
171 * Note: GTT start, end, size should be initialized before calling this
172 * function on AGP platform.
173 *
174 * Note: We don't explictly enforce VRAM start to be aligned on VRAM size,
175 * this shouldn't be a problem as we are using the PCI aperture as a reference.
176 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
177 * not IGP.
178 *
179 * Note: we use mc_vram_size as on some board we need to program the mc to
180 * cover the whole aperture even if VRAM size is inferior to aperture size
181 * Novell bug 204882 + along with lots of ubuntu ones
182 *
183 * Note: when limiting vram it's safe to overwritte real_vram_size because
184 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
185 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
186 * ones)
187 *
188 * Note: IGP TOM addr should be the same as the aperture addr, we don't
189 * explicitly check for that thought.
190 *
191 * FIXME: when reducing VRAM size align new size on power of 2.
104 */ 192 */
105int radeon_mc_setup(struct radeon_device *rdev) 193void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
194{
195 mc->vram_start = base;
196 if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
197 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
198 mc->real_vram_size = mc->aper_size;
199 mc->mc_vram_size = mc->aper_size;
200 }
201 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
202 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) {
203 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
204 mc->real_vram_size = mc->aper_size;
205 mc->mc_vram_size = mc->aper_size;
206 }
207 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
208 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
209 mc->mc_vram_size >> 20, mc->vram_start,
210 mc->vram_end, mc->real_vram_size >> 20);
211}
212
213/**
214 * radeon_gtt_location - try to find GTT location
215 * @rdev: radeon device structure holding all necessary informations
216 * @mc: memory controller structure holding memory informations
217 *
218 * Function will place try to place GTT before or after VRAM.
219 *
220 * If GTT size is bigger than space left then we ajust GTT size.
221 * Thus function will never fails.
222 *
223 * FIXME: when reducing GTT size align new size on power of 2.
224 */
225void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
106{ 226{
107 uint32_t tmp; 227 u64 size_af, size_bf;
108 228
109 /* Some chips have an "issue" with the memory controller, the 229 size_af = 0xFFFFFFFF - mc->vram_end;
110 * location must be aligned to the size. We just align it down, 230 size_bf = mc->vram_start;
111 * too bad if we walk over the top of system memory, we don't 231 if (size_bf > size_af) {
112 * use DMA without a remapped anyway. 232 if (mc->gtt_size > size_bf) {
113 * Affected chips are rv280, all r3xx, and all r4xx, but not IGP 233 dev_warn(rdev->dev, "limiting GTT\n");
114 */ 234 mc->gtt_size = size_bf;
115 /* FGLRX seems to setup like this, VRAM a 0, then GART.
116 */
117 /*
118 * Note: from R6xx the address space is 40bits but here we only
119 * use 32bits (still have to see a card which would exhaust 4G
120 * address space).
121 */
122 if (rdev->mc.vram_location != 0xFFFFFFFFUL) {
123 /* vram location was already setup try to put gtt after
124 * if it fits */
125 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
126 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
127 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
128 rdev->mc.gtt_location = tmp;
129 } else {
130 if (rdev->mc.gtt_size >= rdev->mc.vram_location) {
131 printk(KERN_ERR "[drm] GTT too big to fit "
132 "before or after vram location.\n");
133 return -EINVAL;
134 }
135 rdev->mc.gtt_location = 0;
136 }
137 } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) {
138 /* gtt location was already setup try to put vram before
139 * if it fits */
140 if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) {
141 rdev->mc.vram_location = 0;
142 } else {
143 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size;
144 tmp += (rdev->mc.mc_vram_size - 1);
145 tmp &= ~(rdev->mc.mc_vram_size - 1);
146 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) {
147 rdev->mc.vram_location = tmp;
148 } else {
149 printk(KERN_ERR "[drm] vram too big to fit "
150 "before or after GTT location.\n");
151 return -EINVAL;
152 }
153 } 235 }
236 mc->gtt_start = mc->vram_start - mc->gtt_size;
154 } else { 237 } else {
155 rdev->mc.vram_location = 0; 238 if (mc->gtt_size > size_af) {
156 tmp = rdev->mc.mc_vram_size; 239 dev_warn(rdev->dev, "limiting GTT\n");
157 tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); 240 mc->gtt_size = size_af;
158 rdev->mc.gtt_location = tmp; 241 }
159 } 242 mc->gtt_start = mc->vram_end + 1;
160 rdev->mc.vram_start = rdev->mc.vram_location; 243 }
161 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; 244 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
162 rdev->mc.gtt_start = rdev->mc.gtt_location; 245 dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
163 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 246 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
164 DRM_INFO("radeon: VRAM %uM\n", (unsigned)(rdev->mc.mc_vram_size >> 20));
165 DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n",
166 (unsigned)rdev->mc.vram_location,
167 (unsigned)(rdev->mc.vram_location + rdev->mc.mc_vram_size - 1));
168 DRM_INFO("radeon: GTT %uM\n", (unsigned)(rdev->mc.gtt_size >> 20));
169 DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n",
170 (unsigned)rdev->mc.gtt_location,
171 (unsigned)(rdev->mc.gtt_location + rdev->mc.gtt_size - 1));
172 return 0;
173} 247}
174 248
175
176/* 249/*
177 * GPU helpers function. 250 * GPU helpers function.
178 */ 251 */
@@ -181,7 +254,16 @@ bool radeon_card_posted(struct radeon_device *rdev)
181 uint32_t reg; 254 uint32_t reg;
182 255
183 /* first check CRTCs */ 256 /* first check CRTCs */
184 if (ASIC_IS_AVIVO(rdev)) { 257 if (ASIC_IS_DCE4(rdev)) {
258 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
259 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
260 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
261 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
262 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
263 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
264 if (reg & EVERGREEN_CRTC_MASTER_EN)
265 return true;
266 } else if (ASIC_IS_AVIVO(rdev)) {
185 reg = RREG32(AVIVO_D1CRTC_CONTROL) | 267 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
186 RREG32(AVIVO_D2CRTC_CONTROL); 268 RREG32(AVIVO_D2CRTC_CONTROL);
187 if (reg & AVIVO_CRTC_EN) { 269 if (reg & AVIVO_CRTC_EN) {
@@ -208,8 +290,58 @@ bool radeon_card_posted(struct radeon_device *rdev)
208 290
209} 291}
210 292
293void radeon_update_bandwidth_info(struct radeon_device *rdev)
294{
295 fixed20_12 a;
296 u32 sclk, mclk;
297
298 if (rdev->flags & RADEON_IS_IGP) {
299 sclk = radeon_get_engine_clock(rdev);
300 mclk = rdev->clock.default_mclk;
301
302 a.full = rfixed_const(100);
303 rdev->pm.sclk.full = rfixed_const(sclk);
304 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
305 rdev->pm.mclk.full = rfixed_const(mclk);
306 rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
307
308 a.full = rfixed_const(16);
309 /* core_bandwidth = sclk(Mhz) * 16 */
310 rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
311 } else {
312 sclk = radeon_get_engine_clock(rdev);
313 mclk = radeon_get_memory_clock(rdev);
314
315 a.full = rfixed_const(100);
316 rdev->pm.sclk.full = rfixed_const(sclk);
317 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
318 rdev->pm.mclk.full = rfixed_const(mclk);
319 rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
320 }
321}
322
323bool radeon_boot_test_post_card(struct radeon_device *rdev)
324{
325 if (radeon_card_posted(rdev))
326 return true;
327
328 if (rdev->bios) {
329 DRM_INFO("GPU not posted. posting now...\n");
330 if (rdev->is_atom_bios)
331 atom_asic_init(rdev->mode_info.atom_context);
332 else
333 radeon_combios_asic_init(rdev->ddev);
334 return true;
335 } else {
336 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
337 return false;
338 }
339}
340
211int radeon_dummy_page_init(struct radeon_device *rdev) 341int radeon_dummy_page_init(struct radeon_device *rdev)
212{ 342{
343 if (rdev->dummy_page.page)
344 return 0;
213 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); 345 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
214 if (rdev->dummy_page.page == NULL) 346 if (rdev->dummy_page.page == NULL)
215 return -ENOMEM; 347 return -ENOMEM;
@@ -234,167 +366,6 @@ void radeon_dummy_page_fini(struct radeon_device *rdev)
234} 366}
235 367
236 368
237/*
238 * Registers accessors functions.
239 */
240uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
241{
242 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
243 BUG_ON(1);
244 return 0;
245}
246
247void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
248{
249 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
250 reg, v);
251 BUG_ON(1);
252}
253
254void radeon_register_accessor_init(struct radeon_device *rdev)
255{
256 rdev->mc_rreg = &radeon_invalid_rreg;
257 rdev->mc_wreg = &radeon_invalid_wreg;
258 rdev->pll_rreg = &radeon_invalid_rreg;
259 rdev->pll_wreg = &radeon_invalid_wreg;
260 rdev->pciep_rreg = &radeon_invalid_rreg;
261 rdev->pciep_wreg = &radeon_invalid_wreg;
262
263 /* Don't change order as we are overridding accessor. */
264 if (rdev->family < CHIP_RV515) {
265 rdev->pcie_reg_mask = 0xff;
266 } else {
267 rdev->pcie_reg_mask = 0x7ff;
268 }
269 /* FIXME: not sure here */
270 if (rdev->family <= CHIP_R580) {
271 rdev->pll_rreg = &r100_pll_rreg;
272 rdev->pll_wreg = &r100_pll_wreg;
273 }
274 if (rdev->family >= CHIP_R420) {
275 rdev->mc_rreg = &r420_mc_rreg;
276 rdev->mc_wreg = &r420_mc_wreg;
277 }
278 if (rdev->family >= CHIP_RV515) {
279 rdev->mc_rreg = &rv515_mc_rreg;
280 rdev->mc_wreg = &rv515_mc_wreg;
281 }
282 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
283 rdev->mc_rreg = &rs400_mc_rreg;
284 rdev->mc_wreg = &rs400_mc_wreg;
285 }
286 if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
287 rdev->mc_rreg = &rs690_mc_rreg;
288 rdev->mc_wreg = &rs690_mc_wreg;
289 }
290 if (rdev->family == CHIP_RS600) {
291 rdev->mc_rreg = &rs600_mc_rreg;
292 rdev->mc_wreg = &rs600_mc_wreg;
293 }
294 if (rdev->family >= CHIP_R600) {
295 rdev->pciep_rreg = &r600_pciep_rreg;
296 rdev->pciep_wreg = &r600_pciep_wreg;
297 }
298}
299
300
301/*
302 * ASIC
303 */
304int radeon_asic_init(struct radeon_device *rdev)
305{
306 radeon_register_accessor_init(rdev);
307 switch (rdev->family) {
308 case CHIP_R100:
309 case CHIP_RV100:
310 case CHIP_RS100:
311 case CHIP_RV200:
312 case CHIP_RS200:
313 case CHIP_R200:
314 case CHIP_RV250:
315 case CHIP_RS300:
316 case CHIP_RV280:
317 rdev->asic = &r100_asic;
318 break;
319 case CHIP_R300:
320 case CHIP_R350:
321 case CHIP_RV350:
322 case CHIP_RV380:
323 rdev->asic = &r300_asic;
324 if (rdev->flags & RADEON_IS_PCIE) {
325 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
326 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
327 }
328 break;
329 case CHIP_R420:
330 case CHIP_R423:
331 case CHIP_RV410:
332 rdev->asic = &r420_asic;
333 break;
334 case CHIP_RS400:
335 case CHIP_RS480:
336 rdev->asic = &rs400_asic;
337 break;
338 case CHIP_RS600:
339 rdev->asic = &rs600_asic;
340 break;
341 case CHIP_RS690:
342 case CHIP_RS740:
343 rdev->asic = &rs690_asic;
344 break;
345 case CHIP_RV515:
346 rdev->asic = &rv515_asic;
347 break;
348 case CHIP_R520:
349 case CHIP_RV530:
350 case CHIP_RV560:
351 case CHIP_RV570:
352 case CHIP_R580:
353 rdev->asic = &r520_asic;
354 break;
355 case CHIP_R600:
356 case CHIP_RV610:
357 case CHIP_RV630:
358 case CHIP_RV620:
359 case CHIP_RV635:
360 case CHIP_RV670:
361 case CHIP_RS780:
362 case CHIP_RS880:
363 rdev->asic = &r600_asic;
364 break;
365 case CHIP_RV770:
366 case CHIP_RV730:
367 case CHIP_RV710:
368 case CHIP_RV740:
369 rdev->asic = &rv770_asic;
370 break;
371 default:
372 /* FIXME: not supported yet */
373 return -EINVAL;
374 }
375 return 0;
376}
377
378
379/*
380 * Wrapper around modesetting bits.
381 */
382int radeon_clocks_init(struct radeon_device *rdev)
383{
384 int r;
385
386 r = radeon_static_clocks_init(rdev->ddev);
387 if (r) {
388 return r;
389 }
390 DRM_INFO("Clocks initialized !\n");
391 return 0;
392}
393
394void radeon_clocks_fini(struct radeon_device *rdev)
395{
396}
397
398/* ATOM accessor methods */ 369/* ATOM accessor methods */
399static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) 370static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
400{ 371{
@@ -462,13 +433,18 @@ int radeon_atombios_init(struct radeon_device *rdev)
462 atom_card_info->pll_write = cail_pll_write; 433 atom_card_info->pll_write = cail_pll_write;
463 434
464 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); 435 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
436 mutex_init(&rdev->mode_info.atom_context->mutex);
465 radeon_atom_initialize_bios_scratch_regs(rdev->ddev); 437 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
438 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
466 return 0; 439 return 0;
467} 440}
468 441
469void radeon_atombios_fini(struct radeon_device *rdev) 442void radeon_atombios_fini(struct radeon_device *rdev)
470{ 443{
471 kfree(rdev->mode_info.atom_context); 444 if (rdev->mode_info.atom_context) {
445 kfree(rdev->mode_info.atom_context->scratch);
446 kfree(rdev->mode_info.atom_context);
447 }
472 kfree(rdev->mode_info.atom_card_info); 448 kfree(rdev->mode_info.atom_card_info);
473} 449}
474 450
@@ -494,31 +470,102 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
494 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 470 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
495} 471}
496 472
497void radeon_agp_disable(struct radeon_device *rdev) 473void radeon_check_arguments(struct radeon_device *rdev)
498{ 474{
499 rdev->flags &= ~RADEON_IS_AGP; 475 /* vramlimit must be a power of two */
500 if (rdev->family >= CHIP_R600) { 476 switch (radeon_vram_limit) {
501 DRM_INFO("Forcing AGP to PCIE mode\n"); 477 case 0:
502 rdev->flags |= RADEON_IS_PCIE; 478 case 4:
503 } else if (rdev->family >= CHIP_RV515 || 479 case 8:
504 rdev->family == CHIP_RV380 || 480 case 16:
505 rdev->family == CHIP_RV410 || 481 case 32:
506 rdev->family == CHIP_R423) { 482 case 64:
507 DRM_INFO("Forcing AGP to PCIE mode\n"); 483 case 128:
508 rdev->flags |= RADEON_IS_PCIE; 484 case 256:
509 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; 485 case 512:
510 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; 486 case 1024:
487 case 2048:
488 case 4096:
489 break;
490 default:
491 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
492 radeon_vram_limit);
493 radeon_vram_limit = 0;
494 break;
495 }
496 radeon_vram_limit = radeon_vram_limit << 20;
497 /* gtt size must be power of two and greater or equal to 32M */
498 switch (radeon_gart_size) {
499 case 4:
500 case 8:
501 case 16:
502 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
503 radeon_gart_size);
504 radeon_gart_size = 512;
505 break;
506 case 32:
507 case 64:
508 case 128:
509 case 256:
510 case 512:
511 case 1024:
512 case 2048:
513 case 4096:
514 break;
515 default:
516 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
517 radeon_gart_size);
518 radeon_gart_size = 512;
519 break;
520 }
521 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
522 /* AGP mode can only be -1, 1, 2, 4, 8 */
523 switch (radeon_agpmode) {
524 case -1:
525 case 0:
526 case 1:
527 case 2:
528 case 4:
529 case 8:
530 break;
531 default:
532 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
533 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
534 radeon_agpmode = 0;
535 break;
536 }
537}
538
539static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
540{
541 struct drm_device *dev = pci_get_drvdata(pdev);
542 struct radeon_device *rdev = dev->dev_private;
543 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
544 if (state == VGA_SWITCHEROO_ON) {
545 printk(KERN_INFO "radeon: switched on\n");
546 /* don't suspend or resume card normally */
547 rdev->powered_down = false;
548 radeon_resume_kms(dev);
511 } else { 549 } else {
512 DRM_INFO("Forcing AGP to PCI mode\n"); 550 printk(KERN_INFO "radeon: switched off\n");
513 rdev->flags |= RADEON_IS_PCI; 551 radeon_suspend_kms(dev, pmm);
514 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; 552 /* don't suspend or resume card normally */
515 rdev->asic->gart_set_page = &r100_pci_gart_set_page; 553 rdev->powered_down = true;
516 } 554 }
517} 555}
518 556
519/* 557static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
520 * Radeon device. 558{
521 */ 559 struct drm_device *dev = pci_get_drvdata(pdev);
560 bool can_switch;
561
562 spin_lock(&dev->count_lock);
563 can_switch = (dev->open_count == 0);
564 spin_unlock(&dev->count_lock);
565 return can_switch;
566}
567
568
522int radeon_device_init(struct radeon_device *rdev, 569int radeon_device_init(struct radeon_device *rdev,
523 struct drm_device *ddev, 570 struct drm_device *ddev,
524 struct pci_dev *pdev, 571 struct pci_dev *pdev,
@@ -527,7 +574,6 @@ int radeon_device_init(struct radeon_device *rdev,
527 int r; 574 int r;
528 int dma_bits; 575 int dma_bits;
529 576
530 DRM_INFO("radeon: Initializing kernel modesetting.\n");
531 rdev->shutdown = false; 577 rdev->shutdown = false;
532 rdev->dev = &pdev->dev; 578 rdev->dev = &pdev->dev;
533 rdev->ddev = ddev; 579 rdev->ddev = ddev;
@@ -539,21 +585,44 @@ int radeon_device_init(struct radeon_device *rdev,
539 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 585 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
540 rdev->gpu_lockup = false; 586 rdev->gpu_lockup = false;
541 rdev->accel_working = false; 587 rdev->accel_working = false;
588
589 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n",
590 radeon_family_name[rdev->family], pdev->vendor, pdev->device);
591
542 /* mutex initialization are all done here so we 592 /* mutex initialization are all done here so we
543 * can recall function without having locking issues */ 593 * can recall function without having locking issues */
544 mutex_init(&rdev->cs_mutex); 594 mutex_init(&rdev->cs_mutex);
545 mutex_init(&rdev->ib_pool.mutex); 595 mutex_init(&rdev->ib_pool.mutex);
546 mutex_init(&rdev->cp.mutex); 596 mutex_init(&rdev->cp.mutex);
597 mutex_init(&rdev->dc_hw_i2c_mutex);
598 if (rdev->family >= CHIP_R600)
599 spin_lock_init(&rdev->ih.lock);
600 mutex_init(&rdev->gem.mutex);
601 mutex_init(&rdev->pm.mutex);
547 rwlock_init(&rdev->fence_drv.lock); 602 rwlock_init(&rdev->fence_drv.lock);
548 INIT_LIST_HEAD(&rdev->gem.objects); 603 INIT_LIST_HEAD(&rdev->gem.objects);
604 init_waitqueue_head(&rdev->irq.vblank_queue);
605
606 /* setup workqueue */
607 rdev->wq = create_workqueue("radeon");
608 if (rdev->wq == NULL)
609 return -ENOMEM;
549 610
550 /* Set asic functions */ 611 /* Set asic functions */
551 r = radeon_asic_init(rdev); 612 r = radeon_asic_init(rdev);
552 if (r) { 613 if (r)
553 return r; 614 return r;
615 radeon_check_arguments(rdev);
616
617 /* all of the newer IGP chips have an internal gart
618 * However some rs4xx report as AGP, so remove that here.
619 */
620 if ((rdev->family >= CHIP_RS400) &&
621 (rdev->flags & RADEON_IS_IGP)) {
622 rdev->flags &= ~RADEON_IS_AGP;
554 } 623 }
555 624
556 if (radeon_agpmode == -1) { 625 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
557 radeon_agp_disable(rdev); 626 radeon_agp_disable(rdev);
558 } 627 }
559 628
@@ -590,6 +659,9 @@ int radeon_device_init(struct radeon_device *rdev,
590 /* this will fail for cards that aren't VGA class devices, just 659 /* this will fail for cards that aren't VGA class devices, just
591 * ignore it */ 660 * ignore it */
592 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 661 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
662 vga_switcheroo_register_client(rdev->pdev,
663 radeon_switcheroo_set_state,
664 radeon_switcheroo_can_switch);
593 665
594 r = radeon_init(rdev); 666 r = radeon_init(rdev);
595 if (r) 667 if (r)
@@ -620,6 +692,8 @@ void radeon_device_fini(struct radeon_device *rdev)
620 DRM_INFO("radeon: finishing device.\n"); 692 DRM_INFO("radeon: finishing device.\n");
621 rdev->shutdown = true; 693 rdev->shutdown = true;
622 radeon_fini(rdev); 694 radeon_fini(rdev);
695 destroy_workqueue(rdev->wq);
696 vga_switcheroo_unregister_client(rdev->pdev);
623 vga_client_register(rdev->pdev, NULL, NULL, NULL); 697 vga_client_register(rdev->pdev, NULL, NULL, NULL);
624 iounmap(rdev->rmmio); 698 iounmap(rdev->rmmio);
625 rdev->rmmio = NULL; 699 rdev->rmmio = NULL;
@@ -631,38 +705,48 @@ void radeon_device_fini(struct radeon_device *rdev)
631 */ 705 */
632int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) 706int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
633{ 707{
634 struct radeon_device *rdev = dev->dev_private; 708 struct radeon_device *rdev;
635 struct drm_crtc *crtc; 709 struct drm_crtc *crtc;
710 int r;
636 711
637 if (dev == NULL || rdev == NULL) { 712 if (dev == NULL || dev->dev_private == NULL) {
638 return -ENODEV; 713 return -ENODEV;
639 } 714 }
640 if (state.event == PM_EVENT_PRETHAW) { 715 if (state.event == PM_EVENT_PRETHAW) {
641 return 0; 716 return 0;
642 } 717 }
718 rdev = dev->dev_private;
719
720 if (rdev->powered_down)
721 return 0;
643 /* unpin the front buffers */ 722 /* unpin the front buffers */
644 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 723 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
645 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); 724 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
646 struct radeon_object *robj; 725 struct radeon_bo *robj;
647 726
648 if (rfb == NULL || rfb->obj == NULL) { 727 if (rfb == NULL || rfb->obj == NULL) {
649 continue; 728 continue;
650 } 729 }
651 robj = rfb->obj->driver_private; 730 robj = rfb->obj->driver_private;
652 if (robj != rdev->fbdev_robj) { 731 if (robj != rdev->fbdev_rbo) {
653 radeon_object_unpin(robj); 732 r = radeon_bo_reserve(robj, false);
733 if (unlikely(r == 0)) {
734 radeon_bo_unpin(robj);
735 radeon_bo_unreserve(robj);
736 }
654 } 737 }
655 } 738 }
656 /* evict vram memory */ 739 /* evict vram memory */
657 radeon_object_evict_vram(rdev); 740 radeon_bo_evict_vram(rdev);
658 /* wait for gpu to finish processing current batch */ 741 /* wait for gpu to finish processing current batch */
659 radeon_fence_wait_last(rdev); 742 radeon_fence_wait_last(rdev);
660 743
661 radeon_save_bios_scratch_regs(rdev); 744 radeon_save_bios_scratch_regs(rdev);
662 745
663 radeon_suspend(rdev); 746 radeon_suspend(rdev);
747 radeon_hpd_fini(rdev);
664 /* evict remaining vram memory */ 748 /* evict remaining vram memory */
665 radeon_object_evict_vram(rdev); 749 radeon_bo_evict_vram(rdev);
666 750
667 pci_save_state(dev->pdev); 751 pci_save_state(dev->pdev);
668 if (state.event == PM_EVENT_SUSPEND) { 752 if (state.event == PM_EVENT_SUSPEND) {
@@ -680,6 +764,9 @@ int radeon_resume_kms(struct drm_device *dev)
680{ 764{
681 struct radeon_device *rdev = dev->dev_private; 765 struct radeon_device *rdev = dev->dev_private;
682 766
767 if (rdev->powered_down)
768 return 0;
769
683 acquire_console_sem(); 770 acquire_console_sem();
684 pci_set_power_state(dev->pdev, PCI_D0); 771 pci_set_power_state(dev->pdev, PCI_D0);
685 pci_restore_state(dev->pdev); 772 pci_restore_state(dev->pdev);
@@ -695,6 +782,8 @@ int radeon_resume_kms(struct drm_device *dev)
695 fb_set_suspend(rdev->fbdev_info, 0); 782 fb_set_suspend(rdev->fbdev_info, 0);
696 release_console_sem(); 783 release_console_sem();
697 784
785 /* reset hpd state */
786 radeon_hpd_init(rdev);
698 /* blat the mode back in */ 787 /* blat the mode back in */
699 drm_helper_resume_force_mode(dev); 788 drm_helper_resume_force_mode(dev);
700 return 0; 789 return 0;