diff options
42 files changed, 2731 insertions, 1122 deletions
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index c15287a590ff..fba3c96b915b 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
| @@ -241,6 +241,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 241 | { | 241 | { |
| 242 | struct drm_device *dev = crtc->dev; | 242 | struct drm_device *dev = crtc->dev; |
| 243 | struct radeon_device *rdev = dev->dev_private; | 243 | struct radeon_device *rdev = dev->dev_private; |
| 244 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
| 244 | 245 | ||
| 245 | switch (mode) { | 246 | switch (mode) { |
| 246 | case DRM_MODE_DPMS_ON: | 247 | case DRM_MODE_DPMS_ON: |
| @@ -248,20 +249,19 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 248 | if (ASIC_IS_DCE3(rdev)) | 249 | if (ASIC_IS_DCE3(rdev)) |
| 249 | atombios_enable_crtc_memreq(crtc, 1); | 250 | atombios_enable_crtc_memreq(crtc, 1); |
| 250 | atombios_blank_crtc(crtc, 0); | 251 | atombios_blank_crtc(crtc, 0); |
| 252 | drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); | ||
| 253 | radeon_crtc_load_lut(crtc); | ||
| 251 | break; | 254 | break; |
| 252 | case DRM_MODE_DPMS_STANDBY: | 255 | case DRM_MODE_DPMS_STANDBY: |
| 253 | case DRM_MODE_DPMS_SUSPEND: | 256 | case DRM_MODE_DPMS_SUSPEND: |
| 254 | case DRM_MODE_DPMS_OFF: | 257 | case DRM_MODE_DPMS_OFF: |
| 258 | drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); | ||
| 255 | atombios_blank_crtc(crtc, 1); | 259 | atombios_blank_crtc(crtc, 1); |
| 256 | if (ASIC_IS_DCE3(rdev)) | 260 | if (ASIC_IS_DCE3(rdev)) |
| 257 | atombios_enable_crtc_memreq(crtc, 0); | 261 | atombios_enable_crtc_memreq(crtc, 0); |
| 258 | atombios_enable_crtc(crtc, 0); | 262 | atombios_enable_crtc(crtc, 0); |
| 259 | break; | 263 | break; |
| 260 | } | 264 | } |
| 261 | |||
| 262 | if (mode != DRM_MODE_DPMS_OFF) { | ||
| 263 | radeon_crtc_load_lut(crtc); | ||
| 264 | } | ||
| 265 | } | 265 | } |
| 266 | 266 | ||
| 267 | static void | 267 | static void |
| @@ -457,9 +457,8 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
| 457 | if (encoder->encoder_type != | 457 | if (encoder->encoder_type != |
| 458 | DRM_MODE_ENCODER_DAC) | 458 | DRM_MODE_ENCODER_DAC) |
| 459 | pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; | 459 | pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; |
| 460 | if (!ASIC_IS_AVIVO(rdev) | 460 | if (encoder->encoder_type == |
| 461 | && (encoder->encoder_type == | 461 | DRM_MODE_ENCODER_LVDS) |
| 462 | DRM_MODE_ENCODER_LVDS)) | ||
| 463 | pll_flags |= RADEON_PLL_USE_REF_DIV; | 462 | pll_flags |= RADEON_PLL_USE_REF_DIV; |
| 464 | } | 463 | } |
| 465 | radeon_encoder = to_radeon_encoder(encoder); | 464 | radeon_encoder = to_radeon_encoder(encoder); |
| @@ -574,21 +573,34 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
| 574 | struct radeon_device *rdev = dev->dev_private; | 573 | struct radeon_device *rdev = dev->dev_private; |
| 575 | struct radeon_framebuffer *radeon_fb; | 574 | struct radeon_framebuffer *radeon_fb; |
| 576 | struct drm_gem_object *obj; | 575 | struct drm_gem_object *obj; |
| 577 | struct drm_radeon_gem_object *obj_priv; | 576 | struct radeon_bo *rbo; |
| 578 | uint64_t fb_location; | 577 | uint64_t fb_location; |
| 579 | uint32_t fb_format, fb_pitch_pixels, tiling_flags; | 578 | uint32_t fb_format, fb_pitch_pixels, tiling_flags; |
| 579 | int r; | ||
| 580 | 580 | ||
| 581 | if (!crtc->fb) | 581 | /* no fb bound */ |
| 582 | return -EINVAL; | 582 | if (!crtc->fb) { |
| 583 | DRM_DEBUG("No FB bound\n"); | ||
| 584 | return 0; | ||
| 585 | } | ||
| 583 | 586 | ||
| 584 | radeon_fb = to_radeon_framebuffer(crtc->fb); | 587 | radeon_fb = to_radeon_framebuffer(crtc->fb); |
| 585 | 588 | ||
| 589 | /* Pin framebuffer & get tilling informations */ | ||
| 586 | obj = radeon_fb->obj; | 590 | obj = radeon_fb->obj; |
| 587 | obj_priv = obj->driver_private; | 591 | rbo = obj->driver_private; |
| 588 | 592 | r = radeon_bo_reserve(rbo, false); | |
| 589 | if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &fb_location)) { | 593 | if (unlikely(r != 0)) |
| 594 | return r; | ||
| 595 | r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location); | ||
| 596 | if (unlikely(r != 0)) { | ||
| 597 | radeon_bo_unreserve(rbo); | ||
| 590 | return -EINVAL; | 598 | return -EINVAL; |
| 591 | } | 599 | } |
| 600 | radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); | ||
| 601 | radeon_bo_unreserve(rbo); | ||
| 602 | if (tiling_flags & RADEON_TILING_MACRO) | ||
| 603 | fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE; | ||
| 592 | 604 | ||
| 593 | switch (crtc->fb->bits_per_pixel) { | 605 | switch (crtc->fb->bits_per_pixel) { |
| 594 | case 8: | 606 | case 8: |
| @@ -618,11 +630,6 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
| 618 | return -EINVAL; | 630 | return -EINVAL; |
| 619 | } | 631 | } |
| 620 | 632 | ||
| 621 | radeon_object_get_tiling_flags(obj->driver_private, | ||
| 622 | &tiling_flags, NULL); | ||
| 623 | if (tiling_flags & RADEON_TILING_MACRO) | ||
| 624 | fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE; | ||
| 625 | |||
| 626 | if (tiling_flags & RADEON_TILING_MICRO) | 633 | if (tiling_flags & RADEON_TILING_MICRO) |
| 627 | fb_format |= AVIVO_D1GRPH_TILED; | 634 | fb_format |= AVIVO_D1GRPH_TILED; |
| 628 | 635 | ||
| @@ -674,7 +681,12 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
| 674 | 681 | ||
| 675 | if (old_fb && old_fb != crtc->fb) { | 682 | if (old_fb && old_fb != crtc->fb) { |
| 676 | radeon_fb = to_radeon_framebuffer(old_fb); | 683 | radeon_fb = to_radeon_framebuffer(old_fb); |
| 677 | radeon_gem_object_unpin(radeon_fb->obj); | 684 | rbo = radeon_fb->obj->driver_private; |
| 685 | r = radeon_bo_reserve(rbo, false); | ||
| 686 | if (unlikely(r != 0)) | ||
| 687 | return r; | ||
| 688 | radeon_bo_unpin(rbo); | ||
| 689 | radeon_bo_unreserve(rbo); | ||
| 678 | } | 690 | } |
| 679 | 691 | ||
| 680 | /* Bytes per pixel may have changed */ | 692 | /* Bytes per pixel may have changed */ |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index c9e93eabcf16..109096802e66 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
| @@ -94,6 +94,15 @@ int r100_pci_gart_init(struct radeon_device *rdev) | |||
| 94 | return radeon_gart_table_ram_alloc(rdev); | 94 | return radeon_gart_table_ram_alloc(rdev); |
| 95 | } | 95 | } |
| 96 | 96 | ||
| 97 | /* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ | ||
| 98 | void r100_enable_bm(struct radeon_device *rdev) | ||
| 99 | { | ||
| 100 | uint32_t tmp; | ||
| 101 | /* Enable bus mastering */ | ||
| 102 | tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; | ||
| 103 | WREG32(RADEON_BUS_CNTL, tmp); | ||
| 104 | } | ||
| 105 | |||
| 97 | int r100_pci_gart_enable(struct radeon_device *rdev) | 106 | int r100_pci_gart_enable(struct radeon_device *rdev) |
| 98 | { | 107 | { |
| 99 | uint32_t tmp; | 108 | uint32_t tmp; |
| @@ -105,9 +114,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev) | |||
| 105 | WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location); | 114 | WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location); |
| 106 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; | 115 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; |
| 107 | WREG32(RADEON_AIC_HI_ADDR, tmp); | 116 | WREG32(RADEON_AIC_HI_ADDR, tmp); |
| 108 | /* Enable bus mastering */ | ||
| 109 | tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; | ||
| 110 | WREG32(RADEON_BUS_CNTL, tmp); | ||
| 111 | /* set PCI GART page-table base address */ | 117 | /* set PCI GART page-table base address */ |
| 112 | WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); | 118 | WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); |
| 113 | tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; | 119 | tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; |
| @@ -255,24 +261,27 @@ int r100_wb_init(struct radeon_device *rdev) | |||
| 255 | int r; | 261 | int r; |
| 256 | 262 | ||
| 257 | if (rdev->wb.wb_obj == NULL) { | 263 | if (rdev->wb.wb_obj == NULL) { |
| 258 | r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, | 264 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, |
| 259 | true, | 265 | RADEON_GEM_DOMAIN_GTT, |
| 260 | RADEON_GEM_DOMAIN_GTT, | 266 | &rdev->wb.wb_obj); |
| 261 | false, &rdev->wb.wb_obj); | ||
| 262 | if (r) { | 267 | if (r) { |
| 263 | DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r); | 268 | dev_err(rdev->dev, "(%d) create WB buffer failed\n", r); |
| 264 | return r; | 269 | return r; |
| 265 | } | 270 | } |
| 266 | r = radeon_object_pin(rdev->wb.wb_obj, | 271 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); |
| 267 | RADEON_GEM_DOMAIN_GTT, | 272 | if (unlikely(r != 0)) |
| 268 | &rdev->wb.gpu_addr); | 273 | return r; |
| 274 | r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, | ||
| 275 | &rdev->wb.gpu_addr); | ||
| 269 | if (r) { | 276 | if (r) { |
| 270 | DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r); | 277 | dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r); |
| 278 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
| 271 | return r; | 279 | return r; |
| 272 | } | 280 | } |
| 273 | r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); | 281 | r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); |
| 282 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
| 274 | if (r) { | 283 | if (r) { |
| 275 | DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r); | 284 | dev_err(rdev->dev, "(%d) map WB buffer failed\n", r); |
| 276 | return r; | 285 | return r; |
| 277 | } | 286 | } |
| 278 | } | 287 | } |
| @@ -290,11 +299,19 @@ void r100_wb_disable(struct radeon_device *rdev) | |||
| 290 | 299 | ||
| 291 | void r100_wb_fini(struct radeon_device *rdev) | 300 | void r100_wb_fini(struct radeon_device *rdev) |
| 292 | { | 301 | { |
| 302 | int r; | ||
| 303 | |||
| 293 | r100_wb_disable(rdev); | 304 | r100_wb_disable(rdev); |
| 294 | if (rdev->wb.wb_obj) { | 305 | if (rdev->wb.wb_obj) { |
| 295 | radeon_object_kunmap(rdev->wb.wb_obj); | 306 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); |
| 296 | radeon_object_unpin(rdev->wb.wb_obj); | 307 | if (unlikely(r != 0)) { |
| 297 | radeon_object_unref(&rdev->wb.wb_obj); | 308 | dev_err(rdev->dev, "(%d) can't finish WB\n", r); |
| 309 | return; | ||
| 310 | } | ||
| 311 | radeon_bo_kunmap(rdev->wb.wb_obj); | ||
| 312 | radeon_bo_unpin(rdev->wb.wb_obj); | ||
| 313 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
| 314 | radeon_bo_unref(&rdev->wb.wb_obj); | ||
| 298 | rdev->wb.wb = NULL; | 315 | rdev->wb.wb = NULL; |
| 299 | rdev->wb.wb_obj = NULL; | 316 | rdev->wb.wb_obj = NULL; |
| 300 | } | 317 | } |
| @@ -1288,17 +1305,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
| 1288 | 1305 | ||
| 1289 | int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, | 1306 | int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, |
| 1290 | struct radeon_cs_packet *pkt, | 1307 | struct radeon_cs_packet *pkt, |
| 1291 | struct radeon_object *robj) | 1308 | struct radeon_bo *robj) |
| 1292 | { | 1309 | { |
| 1293 | unsigned idx; | 1310 | unsigned idx; |
| 1294 | u32 value; | 1311 | u32 value; |
| 1295 | idx = pkt->idx + 1; | 1312 | idx = pkt->idx + 1; |
| 1296 | value = radeon_get_ib_value(p, idx + 2); | 1313 | value = radeon_get_ib_value(p, idx + 2); |
| 1297 | if ((value + 1) > radeon_object_size(robj)) { | 1314 | if ((value + 1) > radeon_bo_size(robj)) { |
| 1298 | DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " | 1315 | DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " |
| 1299 | "(need %u have %lu) !\n", | 1316 | "(need %u have %lu) !\n", |
| 1300 | value + 1, | 1317 | value + 1, |
| 1301 | radeon_object_size(robj)); | 1318 | radeon_bo_size(robj)); |
| 1302 | return -EINVAL; | 1319 | return -EINVAL; |
| 1303 | } | 1320 | } |
| 1304 | return 0; | 1321 | return 0; |
| @@ -1583,6 +1600,14 @@ void r100_gpu_init(struct radeon_device *rdev) | |||
| 1583 | r100_hdp_reset(rdev); | 1600 | r100_hdp_reset(rdev); |
| 1584 | } | 1601 | } |
| 1585 | 1602 | ||
| 1603 | void r100_hdp_flush(struct radeon_device *rdev) | ||
| 1604 | { | ||
| 1605 | u32 tmp; | ||
| 1606 | tmp = RREG32(RADEON_HOST_PATH_CNTL); | ||
| 1607 | tmp |= RADEON_HDP_READ_BUFFER_INVALIDATE; | ||
| 1608 | WREG32(RADEON_HOST_PATH_CNTL, tmp); | ||
| 1609 | } | ||
| 1610 | |||
| 1586 | void r100_hdp_reset(struct radeon_device *rdev) | 1611 | void r100_hdp_reset(struct radeon_device *rdev) |
| 1587 | { | 1612 | { |
| 1588 | uint32_t tmp; | 1613 | uint32_t tmp; |
| @@ -1650,6 +1675,17 @@ int r100_gpu_reset(struct radeon_device *rdev) | |||
| 1650 | return 0; | 1675 | return 0; |
| 1651 | } | 1676 | } |
| 1652 | 1677 | ||
| 1678 | void r100_set_common_regs(struct radeon_device *rdev) | ||
| 1679 | { | ||
| 1680 | /* set these so they don't interfere with anything */ | ||
| 1681 | WREG32(RADEON_OV0_SCALE_CNTL, 0); | ||
| 1682 | WREG32(RADEON_SUBPIC_CNTL, 0); | ||
| 1683 | WREG32(RADEON_VIPH_CONTROL, 0); | ||
| 1684 | WREG32(RADEON_I2C_CNTL_1, 0); | ||
| 1685 | WREG32(RADEON_DVI_I2C_CNTL_1, 0); | ||
| 1686 | WREG32(RADEON_CAP0_TRIG_CNTL, 0); | ||
| 1687 | WREG32(RADEON_CAP1_TRIG_CNTL, 0); | ||
| 1688 | } | ||
| 1653 | 1689 | ||
| 1654 | /* | 1690 | /* |
| 1655 | * VRAM info | 1691 | * VRAM info |
| @@ -2594,7 +2630,7 @@ static int r100_cs_track_cube(struct radeon_device *rdev, | |||
| 2594 | struct r100_cs_track *track, unsigned idx) | 2630 | struct r100_cs_track *track, unsigned idx) |
| 2595 | { | 2631 | { |
| 2596 | unsigned face, w, h; | 2632 | unsigned face, w, h; |
| 2597 | struct radeon_object *cube_robj; | 2633 | struct radeon_bo *cube_robj; |
| 2598 | unsigned long size; | 2634 | unsigned long size; |
| 2599 | 2635 | ||
| 2600 | for (face = 0; face < 5; face++) { | 2636 | for (face = 0; face < 5; face++) { |
| @@ -2607,9 +2643,9 @@ static int r100_cs_track_cube(struct radeon_device *rdev, | |||
| 2607 | 2643 | ||
| 2608 | size += track->textures[idx].cube_info[face].offset; | 2644 | size += track->textures[idx].cube_info[face].offset; |
| 2609 | 2645 | ||
| 2610 | if (size > radeon_object_size(cube_robj)) { | 2646 | if (size > radeon_bo_size(cube_robj)) { |
| 2611 | DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", | 2647 | DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", |
| 2612 | size, radeon_object_size(cube_robj)); | 2648 | size, radeon_bo_size(cube_robj)); |
| 2613 | r100_cs_track_texture_print(&track->textures[idx]); | 2649 | r100_cs_track_texture_print(&track->textures[idx]); |
| 2614 | return -1; | 2650 | return -1; |
| 2615 | } | 2651 | } |
| @@ -2620,7 +2656,7 @@ static int r100_cs_track_cube(struct radeon_device *rdev, | |||
| 2620 | static int r100_cs_track_texture_check(struct radeon_device *rdev, | 2656 | static int r100_cs_track_texture_check(struct radeon_device *rdev, |
| 2621 | struct r100_cs_track *track) | 2657 | struct r100_cs_track *track) |
| 2622 | { | 2658 | { |
| 2623 | struct radeon_object *robj; | 2659 | struct radeon_bo *robj; |
| 2624 | unsigned long size; | 2660 | unsigned long size; |
| 2625 | unsigned u, i, w, h; | 2661 | unsigned u, i, w, h; |
| 2626 | int ret; | 2662 | int ret; |
| @@ -2676,9 +2712,9 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev, | |||
| 2676 | "%u\n", track->textures[u].tex_coord_type, u); | 2712 | "%u\n", track->textures[u].tex_coord_type, u); |
| 2677 | return -EINVAL; | 2713 | return -EINVAL; |
| 2678 | } | 2714 | } |
| 2679 | if (size > radeon_object_size(robj)) { | 2715 | if (size > radeon_bo_size(robj)) { |
| 2680 | DRM_ERROR("Texture of unit %u needs %lu bytes but is " | 2716 | DRM_ERROR("Texture of unit %u needs %lu bytes but is " |
| 2681 | "%lu\n", u, size, radeon_object_size(robj)); | 2717 | "%lu\n", u, size, radeon_bo_size(robj)); |
| 2682 | r100_cs_track_texture_print(&track->textures[u]); | 2718 | r100_cs_track_texture_print(&track->textures[u]); |
| 2683 | return -EINVAL; | 2719 | return -EINVAL; |
| 2684 | } | 2720 | } |
| @@ -2700,10 +2736,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
| 2700 | } | 2736 | } |
| 2701 | size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; | 2737 | size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; |
| 2702 | size += track->cb[i].offset; | 2738 | size += track->cb[i].offset; |
| 2703 | if (size > radeon_object_size(track->cb[i].robj)) { | 2739 | if (size > radeon_bo_size(track->cb[i].robj)) { |
| 2704 | DRM_ERROR("[drm] Buffer too small for color buffer %d " | 2740 | DRM_ERROR("[drm] Buffer too small for color buffer %d " |
| 2705 | "(need %lu have %lu) !\n", i, size, | 2741 | "(need %lu have %lu) !\n", i, size, |
| 2706 | radeon_object_size(track->cb[i].robj)); | 2742 | radeon_bo_size(track->cb[i].robj)); |
| 2707 | DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", | 2743 | DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", |
| 2708 | i, track->cb[i].pitch, track->cb[i].cpp, | 2744 | i, track->cb[i].pitch, track->cb[i].cpp, |
| 2709 | track->cb[i].offset, track->maxy); | 2745 | track->cb[i].offset, track->maxy); |
| @@ -2717,10 +2753,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
| 2717 | } | 2753 | } |
| 2718 | size = track->zb.pitch * track->zb.cpp * track->maxy; | 2754 | size = track->zb.pitch * track->zb.cpp * track->maxy; |
| 2719 | size += track->zb.offset; | 2755 | size += track->zb.offset; |
| 2720 | if (size > radeon_object_size(track->zb.robj)) { | 2756 | if (size > radeon_bo_size(track->zb.robj)) { |
| 2721 | DRM_ERROR("[drm] Buffer too small for z buffer " | 2757 | DRM_ERROR("[drm] Buffer too small for z buffer " |
| 2722 | "(need %lu have %lu) !\n", size, | 2758 | "(need %lu have %lu) !\n", size, |
| 2723 | radeon_object_size(track->zb.robj)); | 2759 | radeon_bo_size(track->zb.robj)); |
| 2724 | DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n", | 2760 | DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n", |
| 2725 | track->zb.pitch, track->zb.cpp, | 2761 | track->zb.pitch, track->zb.cpp, |
| 2726 | track->zb.offset, track->maxy); | 2762 | track->zb.offset, track->maxy); |
| @@ -2738,11 +2774,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
| 2738 | "bound\n", prim_walk, i); | 2774 | "bound\n", prim_walk, i); |
| 2739 | return -EINVAL; | 2775 | return -EINVAL; |
| 2740 | } | 2776 | } |
| 2741 | if (size > radeon_object_size(track->arrays[i].robj)) { | 2777 | if (size > radeon_bo_size(track->arrays[i].robj)) { |
| 2742 | DRM_ERROR("(PW %u) Vertex array %u need %lu dwords " | 2778 | dev_err(rdev->dev, "(PW %u) Vertex array %u " |
| 2743 | "have %lu dwords\n", prim_walk, i, | 2779 | "need %lu dwords have %lu dwords\n", |
| 2744 | size >> 2, | 2780 | prim_walk, i, size >> 2, |
| 2745 | radeon_object_size(track->arrays[i].robj) >> 2); | 2781 | radeon_bo_size(track->arrays[i].robj) |
| 2782 | >> 2); | ||
| 2746 | DRM_ERROR("Max indices %u\n", track->max_indx); | 2783 | DRM_ERROR("Max indices %u\n", track->max_indx); |
| 2747 | return -EINVAL; | 2784 | return -EINVAL; |
| 2748 | } | 2785 | } |
| @@ -2756,10 +2793,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
| 2756 | "bound\n", prim_walk, i); | 2793 | "bound\n", prim_walk, i); |
| 2757 | return -EINVAL; | 2794 | return -EINVAL; |
| 2758 | } | 2795 | } |
| 2759 | if (size > radeon_object_size(track->arrays[i].robj)) { | 2796 | if (size > radeon_bo_size(track->arrays[i].robj)) { |
| 2760 | DRM_ERROR("(PW %u) Vertex array %u need %lu dwords " | 2797 | dev_err(rdev->dev, "(PW %u) Vertex array %u " |
| 2761 | "have %lu dwords\n", prim_walk, i, size >> 2, | 2798 | "need %lu dwords have %lu dwords\n", |
| 2762 | radeon_object_size(track->arrays[i].robj) >> 2); | 2799 | prim_walk, i, size >> 2, |
| 2800 | radeon_bo_size(track->arrays[i].robj) | ||
| 2801 | >> 2); | ||
| 2763 | return -EINVAL; | 2802 | return -EINVAL; |
| 2764 | } | 2803 | } |
| 2765 | } | 2804 | } |
| @@ -3101,6 +3140,9 @@ static int r100_startup(struct radeon_device *rdev) | |||
| 3101 | { | 3140 | { |
| 3102 | int r; | 3141 | int r; |
| 3103 | 3142 | ||
| 3143 | /* set common regs */ | ||
| 3144 | r100_set_common_regs(rdev); | ||
| 3145 | /* program mc */ | ||
| 3104 | r100_mc_program(rdev); | 3146 | r100_mc_program(rdev); |
| 3105 | /* Resume clock */ | 3147 | /* Resume clock */ |
| 3106 | r100_clock_startup(rdev); | 3148 | r100_clock_startup(rdev); |
| @@ -3108,13 +3150,13 @@ static int r100_startup(struct radeon_device *rdev) | |||
| 3108 | r100_gpu_init(rdev); | 3150 | r100_gpu_init(rdev); |
| 3109 | /* Initialize GART (initialize after TTM so we can allocate | 3151 | /* Initialize GART (initialize after TTM so we can allocate |
| 3110 | * memory through TTM but finalize after TTM) */ | 3152 | * memory through TTM but finalize after TTM) */ |
| 3153 | r100_enable_bm(rdev); | ||
| 3111 | if (rdev->flags & RADEON_IS_PCI) { | 3154 | if (rdev->flags & RADEON_IS_PCI) { |
| 3112 | r = r100_pci_gart_enable(rdev); | 3155 | r = r100_pci_gart_enable(rdev); |
| 3113 | if (r) | 3156 | if (r) |
| 3114 | return r; | 3157 | return r; |
| 3115 | } | 3158 | } |
| 3116 | /* Enable IRQ */ | 3159 | /* Enable IRQ */ |
| 3117 | rdev->irq.sw_int = true; | ||
| 3118 | r100_irq_set(rdev); | 3160 | r100_irq_set(rdev); |
| 3119 | /* 1M ring buffer */ | 3161 | /* 1M ring buffer */ |
| 3120 | r = r100_cp_init(rdev, 1024 * 1024); | 3162 | r = r100_cp_init(rdev, 1024 * 1024); |
| @@ -3174,7 +3216,7 @@ void r100_fini(struct radeon_device *rdev) | |||
| 3174 | r100_pci_gart_fini(rdev); | 3216 | r100_pci_gart_fini(rdev); |
| 3175 | radeon_irq_kms_fini(rdev); | 3217 | radeon_irq_kms_fini(rdev); |
| 3176 | radeon_fence_driver_fini(rdev); | 3218 | radeon_fence_driver_fini(rdev); |
| 3177 | radeon_object_fini(rdev); | 3219 | radeon_bo_fini(rdev); |
| 3178 | radeon_atombios_fini(rdev); | 3220 | radeon_atombios_fini(rdev); |
| 3179 | kfree(rdev->bios); | 3221 | kfree(rdev->bios); |
| 3180 | rdev->bios = NULL; | 3222 | rdev->bios = NULL; |
| @@ -3242,10 +3284,8 @@ int r100_init(struct radeon_device *rdev) | |||
| 3242 | RREG32(R_0007C0_CP_STAT)); | 3284 | RREG32(R_0007C0_CP_STAT)); |
| 3243 | } | 3285 | } |
| 3244 | /* check if cards are posted or not */ | 3286 | /* check if cards are posted or not */ |
| 3245 | if (!radeon_card_posted(rdev) && rdev->bios) { | 3287 | if (radeon_boot_test_post_card(rdev) == false) |
| 3246 | DRM_INFO("GPU not posted. posting now...\n"); | 3288 | return -EINVAL; |
| 3247 | radeon_combios_asic_init(rdev->ddev); | ||
| 3248 | } | ||
| 3249 | /* Set asic errata */ | 3289 | /* Set asic errata */ |
| 3250 | r100_errata(rdev); | 3290 | r100_errata(rdev); |
| 3251 | /* Initialize clocks */ | 3291 | /* Initialize clocks */ |
| @@ -3264,7 +3304,7 @@ int r100_init(struct radeon_device *rdev) | |||
| 3264 | if (r) | 3304 | if (r) |
| 3265 | return r; | 3305 | return r; |
| 3266 | /* Memory manager */ | 3306 | /* Memory manager */ |
| 3267 | r = radeon_object_init(rdev); | 3307 | r = radeon_bo_init(rdev); |
| 3268 | if (r) | 3308 | if (r) |
| 3269 | return r; | 3309 | return r; |
| 3270 | if (rdev->flags & RADEON_IS_PCI) { | 3310 | if (rdev->flags & RADEON_IS_PCI) { |
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h index 0daf0d76a891..ca50903dd2bb 100644 --- a/drivers/gpu/drm/radeon/r100_track.h +++ b/drivers/gpu/drm/radeon/r100_track.h | |||
| @@ -10,26 +10,26 @@ | |||
| 10 | * CS functions | 10 | * CS functions |
| 11 | */ | 11 | */ |
| 12 | struct r100_cs_track_cb { | 12 | struct r100_cs_track_cb { |
| 13 | struct radeon_object *robj; | 13 | struct radeon_bo *robj; |
| 14 | unsigned pitch; | 14 | unsigned pitch; |
| 15 | unsigned cpp; | 15 | unsigned cpp; |
| 16 | unsigned offset; | 16 | unsigned offset; |
| 17 | }; | 17 | }; |
| 18 | 18 | ||
| 19 | struct r100_cs_track_array { | 19 | struct r100_cs_track_array { |
| 20 | struct radeon_object *robj; | 20 | struct radeon_bo *robj; |
| 21 | unsigned esize; | 21 | unsigned esize; |
| 22 | }; | 22 | }; |
| 23 | 23 | ||
| 24 | struct r100_cs_cube_info { | 24 | struct r100_cs_cube_info { |
| 25 | struct radeon_object *robj; | 25 | struct radeon_bo *robj; |
| 26 | unsigned offset; | 26 | unsigned offset; |
| 27 | unsigned width; | 27 | unsigned width; |
| 28 | unsigned height; | 28 | unsigned height; |
| 29 | }; | 29 | }; |
| 30 | 30 | ||
| 31 | struct r100_cs_track_texture { | 31 | struct r100_cs_track_texture { |
| 32 | struct radeon_object *robj; | 32 | struct radeon_bo *robj; |
| 33 | struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */ | 33 | struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */ |
| 34 | unsigned pitch; | 34 | unsigned pitch; |
| 35 | unsigned width; | 35 | unsigned width; |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 2f43ee8e4048..86065dcc1982 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
| @@ -137,14 +137,19 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev) | |||
| 137 | 137 | ||
| 138 | void rv370_pcie_gart_disable(struct radeon_device *rdev) | 138 | void rv370_pcie_gart_disable(struct radeon_device *rdev) |
| 139 | { | 139 | { |
| 140 | uint32_t tmp; | 140 | u32 tmp; |
| 141 | int r; | ||
| 141 | 142 | ||
| 142 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); | 143 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
| 143 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; | 144 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
| 144 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); | 145 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); |
| 145 | if (rdev->gart.table.vram.robj) { | 146 | if (rdev->gart.table.vram.robj) { |
| 146 | radeon_object_kunmap(rdev->gart.table.vram.robj); | 147 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); |
| 147 | radeon_object_unpin(rdev->gart.table.vram.robj); | 148 | if (likely(r == 0)) { |
| 149 | radeon_bo_kunmap(rdev->gart.table.vram.robj); | ||
| 150 | radeon_bo_unpin(rdev->gart.table.vram.robj); | ||
| 151 | radeon_bo_unreserve(rdev->gart.table.vram.robj); | ||
| 152 | } | ||
| 148 | } | 153 | } |
| 149 | } | 154 | } |
| 150 | 155 | ||
| @@ -1181,6 +1186,9 @@ static int r300_startup(struct radeon_device *rdev) | |||
| 1181 | { | 1186 | { |
| 1182 | int r; | 1187 | int r; |
| 1183 | 1188 | ||
| 1189 | /* set common regs */ | ||
| 1190 | r100_set_common_regs(rdev); | ||
| 1191 | /* program mc */ | ||
| 1184 | r300_mc_program(rdev); | 1192 | r300_mc_program(rdev); |
| 1185 | /* Resume clock */ | 1193 | /* Resume clock */ |
| 1186 | r300_clock_startup(rdev); | 1194 | r300_clock_startup(rdev); |
| @@ -1193,13 +1201,18 @@ static int r300_startup(struct radeon_device *rdev) | |||
| 1193 | if (r) | 1201 | if (r) |
| 1194 | return r; | 1202 | return r; |
| 1195 | } | 1203 | } |
| 1204 | |||
| 1205 | if (rdev->family == CHIP_R300 || | ||
| 1206 | rdev->family == CHIP_R350 || | ||
| 1207 | rdev->family == CHIP_RV350) | ||
| 1208 | r100_enable_bm(rdev); | ||
| 1209 | |||
| 1196 | if (rdev->flags & RADEON_IS_PCI) { | 1210 | if (rdev->flags & RADEON_IS_PCI) { |
| 1197 | r = r100_pci_gart_enable(rdev); | 1211 | r = r100_pci_gart_enable(rdev); |
| 1198 | if (r) | 1212 | if (r) |
| 1199 | return r; | 1213 | return r; |
| 1200 | } | 1214 | } |
| 1201 | /* Enable IRQ */ | 1215 | /* Enable IRQ */ |
| 1202 | rdev->irq.sw_int = true; | ||
| 1203 | r100_irq_set(rdev); | 1216 | r100_irq_set(rdev); |
| 1204 | /* 1M ring buffer */ | 1217 | /* 1M ring buffer */ |
| 1205 | r = r100_cp_init(rdev, 1024 * 1024); | 1218 | r = r100_cp_init(rdev, 1024 * 1024); |
| @@ -1265,7 +1278,7 @@ void r300_fini(struct radeon_device *rdev) | |||
| 1265 | r100_pci_gart_fini(rdev); | 1278 | r100_pci_gart_fini(rdev); |
| 1266 | radeon_irq_kms_fini(rdev); | 1279 | radeon_irq_kms_fini(rdev); |
| 1267 | radeon_fence_driver_fini(rdev); | 1280 | radeon_fence_driver_fini(rdev); |
| 1268 | radeon_object_fini(rdev); | 1281 | radeon_bo_fini(rdev); |
| 1269 | radeon_atombios_fini(rdev); | 1282 | radeon_atombios_fini(rdev); |
| 1270 | kfree(rdev->bios); | 1283 | kfree(rdev->bios); |
| 1271 | rdev->bios = NULL; | 1284 | rdev->bios = NULL; |
| @@ -1303,10 +1316,8 @@ int r300_init(struct radeon_device *rdev) | |||
| 1303 | RREG32(R_0007C0_CP_STAT)); | 1316 | RREG32(R_0007C0_CP_STAT)); |
| 1304 | } | 1317 | } |
| 1305 | /* check if cards are posted or not */ | 1318 | /* check if cards are posted or not */ |
| 1306 | if (!radeon_card_posted(rdev) && rdev->bios) { | 1319 | if (radeon_boot_test_post_card(rdev) == false) |
| 1307 | DRM_INFO("GPU not posted. posting now...\n"); | 1320 | return -EINVAL; |
| 1308 | radeon_combios_asic_init(rdev->ddev); | ||
| 1309 | } | ||
| 1310 | /* Set asic errata */ | 1321 | /* Set asic errata */ |
| 1311 | r300_errata(rdev); | 1322 | r300_errata(rdev); |
| 1312 | /* Initialize clocks */ | 1323 | /* Initialize clocks */ |
| @@ -1325,7 +1336,7 @@ int r300_init(struct radeon_device *rdev) | |||
| 1325 | if (r) | 1336 | if (r) |
| 1326 | return r; | 1337 | return r; |
| 1327 | /* Memory manager */ | 1338 | /* Memory manager */ |
| 1328 | r = radeon_object_init(rdev); | 1339 | r = radeon_bo_init(rdev); |
| 1329 | if (r) | 1340 | if (r) |
| 1330 | return r; | 1341 | return r; |
| 1331 | if (rdev->flags & RADEON_IS_PCIE) { | 1342 | if (rdev->flags & RADEON_IS_PCIE) { |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 1cefdbcc0850..162c3902fe69 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
| @@ -169,6 +169,9 @@ static int r420_startup(struct radeon_device *rdev) | |||
| 169 | { | 169 | { |
| 170 | int r; | 170 | int r; |
| 171 | 171 | ||
| 172 | /* set common regs */ | ||
| 173 | r100_set_common_regs(rdev); | ||
| 174 | /* program mc */ | ||
| 172 | r300_mc_program(rdev); | 175 | r300_mc_program(rdev); |
| 173 | /* Resume clock */ | 176 | /* Resume clock */ |
| 174 | r420_clock_resume(rdev); | 177 | r420_clock_resume(rdev); |
| @@ -186,7 +189,6 @@ static int r420_startup(struct radeon_device *rdev) | |||
| 186 | } | 189 | } |
| 187 | r420_pipes_init(rdev); | 190 | r420_pipes_init(rdev); |
| 188 | /* Enable IRQ */ | 191 | /* Enable IRQ */ |
| 189 | rdev->irq.sw_int = true; | ||
| 190 | r100_irq_set(rdev); | 192 | r100_irq_set(rdev); |
| 191 | /* 1M ring buffer */ | 193 | /* 1M ring buffer */ |
| 192 | r = r100_cp_init(rdev, 1024 * 1024); | 194 | r = r100_cp_init(rdev, 1024 * 1024); |
| @@ -258,7 +260,7 @@ void r420_fini(struct radeon_device *rdev) | |||
| 258 | radeon_agp_fini(rdev); | 260 | radeon_agp_fini(rdev); |
| 259 | radeon_irq_kms_fini(rdev); | 261 | radeon_irq_kms_fini(rdev); |
| 260 | radeon_fence_driver_fini(rdev); | 262 | radeon_fence_driver_fini(rdev); |
| 261 | radeon_object_fini(rdev); | 263 | radeon_bo_fini(rdev); |
| 262 | if (rdev->is_atom_bios) { | 264 | if (rdev->is_atom_bios) { |
| 263 | radeon_atombios_fini(rdev); | 265 | radeon_atombios_fini(rdev); |
| 264 | } else { | 266 | } else { |
| @@ -301,14 +303,9 @@ int r420_init(struct radeon_device *rdev) | |||
| 301 | RREG32(R_0007C0_CP_STAT)); | 303 | RREG32(R_0007C0_CP_STAT)); |
| 302 | } | 304 | } |
| 303 | /* check if cards are posted or not */ | 305 | /* check if cards are posted or not */ |
| 304 | if (!radeon_card_posted(rdev) && rdev->bios) { | 306 | if (radeon_boot_test_post_card(rdev) == false) |
| 305 | DRM_INFO("GPU not posted. posting now...\n"); | 307 | return -EINVAL; |
| 306 | if (rdev->is_atom_bios) { | 308 | |
| 307 | atom_asic_init(rdev->mode_info.atom_context); | ||
| 308 | } else { | ||
| 309 | radeon_combios_asic_init(rdev->ddev); | ||
| 310 | } | ||
| 311 | } | ||
| 312 | /* Initialize clocks */ | 309 | /* Initialize clocks */ |
| 313 | radeon_get_clock_info(rdev->ddev); | 310 | radeon_get_clock_info(rdev->ddev); |
| 314 | /* Initialize power management */ | 311 | /* Initialize power management */ |
| @@ -331,10 +328,13 @@ int r420_init(struct radeon_device *rdev) | |||
| 331 | return r; | 328 | return r; |
| 332 | } | 329 | } |
| 333 | /* Memory manager */ | 330 | /* Memory manager */ |
| 334 | r = radeon_object_init(rdev); | 331 | r = radeon_bo_init(rdev); |
| 335 | if (r) { | 332 | if (r) { |
| 336 | return r; | 333 | return r; |
| 337 | } | 334 | } |
| 335 | if (rdev->family == CHIP_R420) | ||
| 336 | r100_enable_bm(rdev); | ||
| 337 | |||
| 338 | if (rdev->flags & RADEON_IS_PCIE) { | 338 | if (rdev->flags & RADEON_IS_PCIE) { |
| 339 | r = rv370_pcie_gart_init(rdev); | 339 | r = rv370_pcie_gart_init(rdev); |
| 340 | if (r) | 340 | if (r) |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index f7435185c0a6..788eef5c2a08 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
| @@ -185,7 +185,6 @@ static int r520_startup(struct radeon_device *rdev) | |||
| 185 | return r; | 185 | return r; |
| 186 | } | 186 | } |
| 187 | /* Enable IRQ */ | 187 | /* Enable IRQ */ |
| 188 | rdev->irq.sw_int = true; | ||
| 189 | rs600_irq_set(rdev); | 188 | rs600_irq_set(rdev); |
| 190 | /* 1M ring buffer */ | 189 | /* 1M ring buffer */ |
| 191 | r = r100_cp_init(rdev, 1024 * 1024); | 190 | r = r100_cp_init(rdev, 1024 * 1024); |
| @@ -254,6 +253,9 @@ int r520_init(struct radeon_device *rdev) | |||
| 254 | RREG32(R_0007C0_CP_STAT)); | 253 | RREG32(R_0007C0_CP_STAT)); |
| 255 | } | 254 | } |
| 256 | /* check if cards are posted or not */ | 255 | /* check if cards are posted or not */ |
| 256 | if (radeon_boot_test_post_card(rdev) == false) | ||
| 257 | return -EINVAL; | ||
| 258 | |||
| 257 | if (!radeon_card_posted(rdev) && rdev->bios) { | 259 | if (!radeon_card_posted(rdev) && rdev->bios) { |
| 258 | DRM_INFO("GPU not posted. posting now...\n"); | 260 | DRM_INFO("GPU not posted. posting now...\n"); |
| 259 | atom_asic_init(rdev->mode_info.atom_context); | 261 | atom_asic_init(rdev->mode_info.atom_context); |
| @@ -277,7 +279,7 @@ int r520_init(struct radeon_device *rdev) | |||
| 277 | if (r) | 279 | if (r) |
| 278 | return r; | 280 | return r; |
| 279 | /* Memory manager */ | 281 | /* Memory manager */ |
| 280 | r = radeon_object_init(rdev); | 282 | r = radeon_bo_init(rdev); |
| 281 | if (r) | 283 | if (r) |
| 282 | return r; | 284 | return r; |
| 283 | r = rv370_pcie_gart_init(rdev); | 285 | r = rv370_pcie_gart_init(rdev); |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 278f646bc18e..94e7fd2f59e9 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -38,8 +38,10 @@ | |||
| 38 | 38 | ||
| 39 | #define PFP_UCODE_SIZE 576 | 39 | #define PFP_UCODE_SIZE 576 |
| 40 | #define PM4_UCODE_SIZE 1792 | 40 | #define PM4_UCODE_SIZE 1792 |
| 41 | #define RLC_UCODE_SIZE 768 | ||
| 41 | #define R700_PFP_UCODE_SIZE 848 | 42 | #define R700_PFP_UCODE_SIZE 848 |
| 42 | #define R700_PM4_UCODE_SIZE 1360 | 43 | #define R700_PM4_UCODE_SIZE 1360 |
| 44 | #define R700_RLC_UCODE_SIZE 1024 | ||
| 43 | 45 | ||
| 44 | /* Firmware Names */ | 46 | /* Firmware Names */ |
| 45 | MODULE_FIRMWARE("radeon/R600_pfp.bin"); | 47 | MODULE_FIRMWARE("radeon/R600_pfp.bin"); |
| @@ -62,6 +64,8 @@ MODULE_FIRMWARE("radeon/RV730_pfp.bin"); | |||
| 62 | MODULE_FIRMWARE("radeon/RV730_me.bin"); | 64 | MODULE_FIRMWARE("radeon/RV730_me.bin"); |
| 63 | MODULE_FIRMWARE("radeon/RV710_pfp.bin"); | 65 | MODULE_FIRMWARE("radeon/RV710_pfp.bin"); |
| 64 | MODULE_FIRMWARE("radeon/RV710_me.bin"); | 66 | MODULE_FIRMWARE("radeon/RV710_me.bin"); |
| 67 | MODULE_FIRMWARE("radeon/R600_rlc.bin"); | ||
| 68 | MODULE_FIRMWARE("radeon/R700_rlc.bin"); | ||
| 65 | 69 | ||
| 66 | int r600_debugfs_mc_info_init(struct radeon_device *rdev); | 70 | int r600_debugfs_mc_info_init(struct radeon_device *rdev); |
| 67 | 71 | ||
| @@ -180,7 +184,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev) | |||
| 180 | void r600_pcie_gart_disable(struct radeon_device *rdev) | 184 | void r600_pcie_gart_disable(struct radeon_device *rdev) |
| 181 | { | 185 | { |
| 182 | u32 tmp; | 186 | u32 tmp; |
| 183 | int i; | 187 | int i, r; |
| 184 | 188 | ||
| 185 | /* Disable all tables */ | 189 | /* Disable all tables */ |
| 186 | for (i = 0; i < 7; i++) | 190 | for (i = 0; i < 7; i++) |
| @@ -208,8 +212,12 @@ void r600_pcie_gart_disable(struct radeon_device *rdev) | |||
| 208 | WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); | 212 | WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); |
| 209 | WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); | 213 | WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); |
| 210 | if (rdev->gart.table.vram.robj) { | 214 | if (rdev->gart.table.vram.robj) { |
| 211 | radeon_object_kunmap(rdev->gart.table.vram.robj); | 215 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); |
| 212 | radeon_object_unpin(rdev->gart.table.vram.robj); | 216 | if (likely(r == 0)) { |
| 217 | radeon_bo_kunmap(rdev->gart.table.vram.robj); | ||
| 218 | radeon_bo_unpin(rdev->gart.table.vram.robj); | ||
| 219 | radeon_bo_unreserve(rdev->gart.table.vram.robj); | ||
| 220 | } | ||
| 213 | } | 221 | } |
| 214 | } | 222 | } |
| 215 | 223 | ||
| @@ -1101,6 +1109,10 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) | |||
| 1101 | (void)RREG32(PCIE_PORT_DATA); | 1109 | (void)RREG32(PCIE_PORT_DATA); |
| 1102 | } | 1110 | } |
| 1103 | 1111 | ||
| 1112 | void r600_hdp_flush(struct radeon_device *rdev) | ||
| 1113 | { | ||
| 1114 | WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); | ||
| 1115 | } | ||
| 1104 | 1116 | ||
| 1105 | /* | 1117 | /* |
| 1106 | * CP & Ring | 1118 | * CP & Ring |
| @@ -1110,11 +1122,12 @@ void r600_cp_stop(struct radeon_device *rdev) | |||
| 1110 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); | 1122 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); |
| 1111 | } | 1123 | } |
| 1112 | 1124 | ||
| 1113 | int r600_cp_init_microcode(struct radeon_device *rdev) | 1125 | int r600_init_microcode(struct radeon_device *rdev) |
| 1114 | { | 1126 | { |
| 1115 | struct platform_device *pdev; | 1127 | struct platform_device *pdev; |
| 1116 | const char *chip_name; | 1128 | const char *chip_name; |
| 1117 | size_t pfp_req_size, me_req_size; | 1129 | const char *rlc_chip_name; |
| 1130 | size_t pfp_req_size, me_req_size, rlc_req_size; | ||
| 1118 | char fw_name[30]; | 1131 | char fw_name[30]; |
| 1119 | int err; | 1132 | int err; |
| 1120 | 1133 | ||
| @@ -1128,30 +1141,62 @@ int r600_cp_init_microcode(struct radeon_device *rdev) | |||
| 1128 | } | 1141 | } |
| 1129 | 1142 | ||
| 1130 | switch (rdev->family) { | 1143 | switch (rdev->family) { |
| 1131 | case CHIP_R600: chip_name = "R600"; break; | 1144 | case CHIP_R600: |
| 1132 | case CHIP_RV610: chip_name = "RV610"; break; | 1145 | chip_name = "R600"; |
| 1133 | case CHIP_RV630: chip_name = "RV630"; break; | 1146 | rlc_chip_name = "R600"; |
| 1134 | case CHIP_RV620: chip_name = "RV620"; break; | 1147 | break; |
| 1135 | case CHIP_RV635: chip_name = "RV635"; break; | 1148 | case CHIP_RV610: |
| 1136 | case CHIP_RV670: chip_name = "RV670"; break; | 1149 | chip_name = "RV610"; |
| 1150 | rlc_chip_name = "R600"; | ||
| 1151 | break; | ||
| 1152 | case CHIP_RV630: | ||
| 1153 | chip_name = "RV630"; | ||
| 1154 | rlc_chip_name = "R600"; | ||
| 1155 | break; | ||
| 1156 | case CHIP_RV620: | ||
| 1157 | chip_name = "RV620"; | ||
| 1158 | rlc_chip_name = "R600"; | ||
| 1159 | break; | ||
| 1160 | case CHIP_RV635: | ||
| 1161 | chip_name = "RV635"; | ||
| 1162 | rlc_chip_name = "R600"; | ||
| 1163 | break; | ||
| 1164 | case CHIP_RV670: | ||
| 1165 | chip_name = "RV670"; | ||
| 1166 | rlc_chip_name = "R600"; | ||
| 1167 | break; | ||
| 1137 | case CHIP_RS780: | 1168 | case CHIP_RS780: |
| 1138 | case CHIP_RS880: chip_name = "RS780"; break; | 1169 | case CHIP_RS880: |
| 1139 | case CHIP_RV770: chip_name = "RV770"; break; | 1170 | chip_name = "RS780"; |
| 1171 | rlc_chip_name = "R600"; | ||
| 1172 | break; | ||
| 1173 | case CHIP_RV770: | ||
| 1174 | chip_name = "RV770"; | ||
| 1175 | rlc_chip_name = "R700"; | ||
| 1176 | break; | ||
| 1140 | case CHIP_RV730: | 1177 | case CHIP_RV730: |
| 1141 | case CHIP_RV740: chip_name = "RV730"; break; | 1178 | case CHIP_RV740: |
| 1142 | case CHIP_RV710: chip_name = "RV710"; break; | 1179 | chip_name = "RV730"; |
| 1180 | rlc_chip_name = "R700"; | ||
| 1181 | break; | ||
| 1182 | case CHIP_RV710: | ||
| 1183 | chip_name = "RV710"; | ||
| 1184 | rlc_chip_name = "R700"; | ||
| 1185 | break; | ||
| 1143 | default: BUG(); | 1186 | default: BUG(); |
| 1144 | } | 1187 | } |
| 1145 | 1188 | ||
| 1146 | if (rdev->family >= CHIP_RV770) { | 1189 | if (rdev->family >= CHIP_RV770) { |
| 1147 | pfp_req_size = R700_PFP_UCODE_SIZE * 4; | 1190 | pfp_req_size = R700_PFP_UCODE_SIZE * 4; |
| 1148 | me_req_size = R700_PM4_UCODE_SIZE * 4; | 1191 | me_req_size = R700_PM4_UCODE_SIZE * 4; |
| 1192 | rlc_req_size = R700_RLC_UCODE_SIZE * 4; | ||
| 1149 | } else { | 1193 | } else { |
| 1150 | pfp_req_size = PFP_UCODE_SIZE * 4; | 1194 | pfp_req_size = PFP_UCODE_SIZE * 4; |
| 1151 | me_req_size = PM4_UCODE_SIZE * 12; | 1195 | me_req_size = PM4_UCODE_SIZE * 12; |
| 1196 | rlc_req_size = RLC_UCODE_SIZE * 4; | ||
| 1152 | } | 1197 | } |
| 1153 | 1198 | ||
| 1154 | DRM_INFO("Loading %s CP Microcode\n", chip_name); | 1199 | DRM_INFO("Loading %s Microcode\n", chip_name); |
| 1155 | 1200 | ||
| 1156 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); | 1201 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); |
| 1157 | err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); | 1202 | err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); |
| @@ -1175,6 +1220,18 @@ int r600_cp_init_microcode(struct radeon_device *rdev) | |||
| 1175 | rdev->me_fw->size, fw_name); | 1220 | rdev->me_fw->size, fw_name); |
| 1176 | err = -EINVAL; | 1221 | err = -EINVAL; |
| 1177 | } | 1222 | } |
| 1223 | |||
| 1224 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); | ||
| 1225 | err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); | ||
| 1226 | if (err) | ||
| 1227 | goto out; | ||
| 1228 | if (rdev->rlc_fw->size != rlc_req_size) { | ||
| 1229 | printk(KERN_ERR | ||
| 1230 | "r600_rlc: Bogus length %zu in firmware \"%s\"\n", | ||
| 1231 | rdev->rlc_fw->size, fw_name); | ||
| 1232 | err = -EINVAL; | ||
| 1233 | } | ||
| 1234 | |||
| 1178 | out: | 1235 | out: |
| 1179 | platform_device_unregister(pdev); | 1236 | platform_device_unregister(pdev); |
| 1180 | 1237 | ||
| @@ -1187,6 +1244,8 @@ out: | |||
| 1187 | rdev->pfp_fw = NULL; | 1244 | rdev->pfp_fw = NULL; |
| 1188 | release_firmware(rdev->me_fw); | 1245 | release_firmware(rdev->me_fw); |
| 1189 | rdev->me_fw = NULL; | 1246 | rdev->me_fw = NULL; |
| 1247 | release_firmware(rdev->rlc_fw); | ||
| 1248 | rdev->rlc_fw = NULL; | ||
| 1190 | } | 1249 | } |
| 1191 | return err; | 1250 | return err; |
| 1192 | } | 1251 | } |
| @@ -1381,10 +1440,16 @@ int r600_ring_test(struct radeon_device *rdev) | |||
| 1381 | 1440 | ||
| 1382 | void r600_wb_disable(struct radeon_device *rdev) | 1441 | void r600_wb_disable(struct radeon_device *rdev) |
| 1383 | { | 1442 | { |
| 1443 | int r; | ||
| 1444 | |||
| 1384 | WREG32(SCRATCH_UMSK, 0); | 1445 | WREG32(SCRATCH_UMSK, 0); |
| 1385 | if (rdev->wb.wb_obj) { | 1446 | if (rdev->wb.wb_obj) { |
| 1386 | radeon_object_kunmap(rdev->wb.wb_obj); | 1447 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); |
| 1387 | radeon_object_unpin(rdev->wb.wb_obj); | 1448 | if (unlikely(r != 0)) |
| 1449 | return; | ||
| 1450 | radeon_bo_kunmap(rdev->wb.wb_obj); | ||
| 1451 | radeon_bo_unpin(rdev->wb.wb_obj); | ||
| 1452 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
| 1388 | } | 1453 | } |
| 1389 | } | 1454 | } |
| 1390 | 1455 | ||
| @@ -1392,7 +1457,7 @@ void r600_wb_fini(struct radeon_device *rdev) | |||
| 1392 | { | 1457 | { |
| 1393 | r600_wb_disable(rdev); | 1458 | r600_wb_disable(rdev); |
| 1394 | if (rdev->wb.wb_obj) { | 1459 | if (rdev->wb.wb_obj) { |
| 1395 | radeon_object_unref(&rdev->wb.wb_obj); | 1460 | radeon_bo_unref(&rdev->wb.wb_obj); |
| 1396 | rdev->wb.wb = NULL; | 1461 | rdev->wb.wb = NULL; |
| 1397 | rdev->wb.wb_obj = NULL; | 1462 | rdev->wb.wb_obj = NULL; |
| 1398 | } | 1463 | } |
| @@ -1403,22 +1468,29 @@ int r600_wb_enable(struct radeon_device *rdev) | |||
| 1403 | int r; | 1468 | int r; |
| 1404 | 1469 | ||
| 1405 | if (rdev->wb.wb_obj == NULL) { | 1470 | if (rdev->wb.wb_obj == NULL) { |
| 1406 | r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, | 1471 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, |
| 1407 | RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj); | 1472 | RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); |
| 1408 | if (r) { | 1473 | if (r) { |
| 1409 | dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r); | 1474 | dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); |
| 1475 | return r; | ||
| 1476 | } | ||
| 1477 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); | ||
| 1478 | if (unlikely(r != 0)) { | ||
| 1479 | r600_wb_fini(rdev); | ||
| 1410 | return r; | 1480 | return r; |
| 1411 | } | 1481 | } |
| 1412 | r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, | 1482 | r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, |
| 1413 | &rdev->wb.gpu_addr); | 1483 | &rdev->wb.gpu_addr); |
| 1414 | if (r) { | 1484 | if (r) { |
| 1415 | dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r); | 1485 | radeon_bo_unreserve(rdev->wb.wb_obj); |
| 1486 | dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); | ||
| 1416 | r600_wb_fini(rdev); | 1487 | r600_wb_fini(rdev); |
| 1417 | return r; | 1488 | return r; |
| 1418 | } | 1489 | } |
| 1419 | r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); | 1490 | r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); |
| 1491 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
| 1420 | if (r) { | 1492 | if (r) { |
| 1421 | dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r); | 1493 | dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); |
| 1422 | r600_wb_fini(rdev); | 1494 | r600_wb_fini(rdev); |
| 1423 | return r; | 1495 | return r; |
| 1424 | } | 1496 | } |
| @@ -1433,10 +1505,14 @@ int r600_wb_enable(struct radeon_device *rdev) | |||
| 1433 | void r600_fence_ring_emit(struct radeon_device *rdev, | 1505 | void r600_fence_ring_emit(struct radeon_device *rdev, |
| 1434 | struct radeon_fence *fence) | 1506 | struct radeon_fence *fence) |
| 1435 | { | 1507 | { |
| 1508 | /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */ | ||
| 1436 | /* Emit fence sequence & fire IRQ */ | 1509 | /* Emit fence sequence & fire IRQ */ |
| 1437 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | 1510 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
| 1438 | radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); | 1511 | radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); |
| 1439 | radeon_ring_write(rdev, fence->seq); | 1512 | radeon_ring_write(rdev, fence->seq); |
| 1513 | /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ | ||
| 1514 | radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); | ||
| 1515 | radeon_ring_write(rdev, RB_INT_STAT); | ||
| 1440 | } | 1516 | } |
| 1441 | 1517 | ||
| 1442 | int r600_copy_dma(struct radeon_device *rdev, | 1518 | int r600_copy_dma(struct radeon_device *rdev, |
| @@ -1459,18 +1535,6 @@ int r600_copy_blit(struct radeon_device *rdev, | |||
| 1459 | return 0; | 1535 | return 0; |
| 1460 | } | 1536 | } |
| 1461 | 1537 | ||
| 1462 | int r600_irq_process(struct radeon_device *rdev) | ||
| 1463 | { | ||
| 1464 | /* FIXME: implement */ | ||
| 1465 | return 0; | ||
| 1466 | } | ||
| 1467 | |||
| 1468 | int r600_irq_set(struct radeon_device *rdev) | ||
| 1469 | { | ||
| 1470 | /* FIXME: implement */ | ||
| 1471 | return 0; | ||
| 1472 | } | ||
| 1473 | |||
| 1474 | int r600_set_surface_reg(struct radeon_device *rdev, int reg, | 1538 | int r600_set_surface_reg(struct radeon_device *rdev, int reg, |
| 1475 | uint32_t tiling_flags, uint32_t pitch, | 1539 | uint32_t tiling_flags, uint32_t pitch, |
| 1476 | uint32_t offset, uint32_t obj_size) | 1540 | uint32_t offset, uint32_t obj_size) |
| @@ -1516,12 +1580,25 @@ int r600_startup(struct radeon_device *rdev) | |||
| 1516 | } | 1580 | } |
| 1517 | r600_gpu_init(rdev); | 1581 | r600_gpu_init(rdev); |
| 1518 | 1582 | ||
| 1519 | r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | 1583 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
| 1520 | &rdev->r600_blit.shader_gpu_addr); | 1584 | if (unlikely(r != 0)) |
| 1585 | return r; | ||
| 1586 | r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | ||
| 1587 | &rdev->r600_blit.shader_gpu_addr); | ||
| 1588 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
| 1589 | if (r) { | ||
| 1590 | dev_err(rdev->dev, "(%d) pin blit object failed\n", r); | ||
| 1591 | return r; | ||
| 1592 | } | ||
| 1593 | |||
| 1594 | /* Enable IRQ */ | ||
| 1595 | r = r600_irq_init(rdev); | ||
| 1521 | if (r) { | 1596 | if (r) { |
| 1522 | DRM_ERROR("failed to pin blit object %d\n", r); | 1597 | DRM_ERROR("radeon: IH init failed (%d).\n", r); |
| 1598 | radeon_irq_kms_fini(rdev); | ||
| 1523 | return r; | 1599 | return r; |
| 1524 | } | 1600 | } |
| 1601 | r600_irq_set(rdev); | ||
| 1525 | 1602 | ||
| 1526 | r = radeon_ring_init(rdev, rdev->cp.ring_size); | 1603 | r = radeon_ring_init(rdev, rdev->cp.ring_size); |
| 1527 | if (r) | 1604 | if (r) |
| @@ -1583,13 +1660,19 @@ int r600_resume(struct radeon_device *rdev) | |||
| 1583 | 1660 | ||
| 1584 | int r600_suspend(struct radeon_device *rdev) | 1661 | int r600_suspend(struct radeon_device *rdev) |
| 1585 | { | 1662 | { |
| 1663 | int r; | ||
| 1664 | |||
| 1586 | /* FIXME: we should wait for ring to be empty */ | 1665 | /* FIXME: we should wait for ring to be empty */ |
| 1587 | r600_cp_stop(rdev); | 1666 | r600_cp_stop(rdev); |
| 1588 | rdev->cp.ready = false; | 1667 | rdev->cp.ready = false; |
| 1589 | r600_wb_disable(rdev); | 1668 | r600_wb_disable(rdev); |
| 1590 | r600_pcie_gart_disable(rdev); | 1669 | r600_pcie_gart_disable(rdev); |
| 1591 | /* unpin shaders bo */ | 1670 | /* unpin shaders bo */ |
| 1592 | radeon_object_unpin(rdev->r600_blit.shader_obj); | 1671 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
| 1672 | if (unlikely(r != 0)) | ||
| 1673 | return r; | ||
| 1674 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | ||
| 1675 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
| 1593 | return 0; | 1676 | return 0; |
| 1594 | } | 1677 | } |
| 1595 | 1678 | ||
| @@ -1627,7 +1710,11 @@ int r600_init(struct radeon_device *rdev) | |||
| 1627 | if (r) | 1710 | if (r) |
| 1628 | return r; | 1711 | return r; |
| 1629 | /* Post card if necessary */ | 1712 | /* Post card if necessary */ |
| 1630 | if (!r600_card_posted(rdev) && rdev->bios) { | 1713 | if (!r600_card_posted(rdev)) { |
| 1714 | if (!rdev->bios) { | ||
| 1715 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); | ||
| 1716 | return -EINVAL; | ||
| 1717 | } | ||
| 1631 | DRM_INFO("GPU not posted. posting now...\n"); | 1718 | DRM_INFO("GPU not posted. posting now...\n"); |
| 1632 | atom_asic_init(rdev->mode_info.atom_context); | 1719 | atom_asic_init(rdev->mode_info.atom_context); |
| 1633 | } | 1720 | } |
| @@ -1650,14 +1737,22 @@ int r600_init(struct radeon_device *rdev) | |||
| 1650 | if (r) | 1737 | if (r) |
| 1651 | return r; | 1738 | return r; |
| 1652 | /* Memory manager */ | 1739 | /* Memory manager */ |
| 1653 | r = radeon_object_init(rdev); | 1740 | r = radeon_bo_init(rdev); |
| 1741 | if (r) | ||
| 1742 | return r; | ||
| 1743 | |||
| 1744 | r = radeon_irq_kms_init(rdev); | ||
| 1654 | if (r) | 1745 | if (r) |
| 1655 | return r; | 1746 | return r; |
| 1747 | |||
| 1656 | rdev->cp.ring_obj = NULL; | 1748 | rdev->cp.ring_obj = NULL; |
| 1657 | r600_ring_init(rdev, 1024 * 1024); | 1749 | r600_ring_init(rdev, 1024 * 1024); |
| 1658 | 1750 | ||
| 1659 | if (!rdev->me_fw || !rdev->pfp_fw) { | 1751 | rdev->ih.ring_obj = NULL; |
| 1660 | r = r600_cp_init_microcode(rdev); | 1752 | r600_ih_ring_init(rdev, 64 * 1024); |
| 1753 | |||
| 1754 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | ||
| 1755 | r = r600_init_microcode(rdev); | ||
| 1661 | if (r) { | 1756 | if (r) { |
| 1662 | DRM_ERROR("Failed to load firmware!\n"); | 1757 | DRM_ERROR("Failed to load firmware!\n"); |
| 1663 | return r; | 1758 | return r; |
| @@ -1704,6 +1799,8 @@ void r600_fini(struct radeon_device *rdev) | |||
| 1704 | r600_suspend(rdev); | 1799 | r600_suspend(rdev); |
| 1705 | 1800 | ||
| 1706 | r600_blit_fini(rdev); | 1801 | r600_blit_fini(rdev); |
| 1802 | r600_irq_fini(rdev); | ||
| 1803 | radeon_irq_kms_fini(rdev); | ||
| 1707 | radeon_ring_fini(rdev); | 1804 | radeon_ring_fini(rdev); |
| 1708 | r600_wb_fini(rdev); | 1805 | r600_wb_fini(rdev); |
| 1709 | r600_pcie_gart_fini(rdev); | 1806 | r600_pcie_gart_fini(rdev); |
| @@ -1712,7 +1809,7 @@ void r600_fini(struct radeon_device *rdev) | |||
| 1712 | radeon_clocks_fini(rdev); | 1809 | radeon_clocks_fini(rdev); |
| 1713 | if (rdev->flags & RADEON_IS_AGP) | 1810 | if (rdev->flags & RADEON_IS_AGP) |
| 1714 | radeon_agp_fini(rdev); | 1811 | radeon_agp_fini(rdev); |
| 1715 | radeon_object_fini(rdev); | 1812 | radeon_bo_fini(rdev); |
| 1716 | radeon_atombios_fini(rdev); | 1813 | radeon_atombios_fini(rdev); |
| 1717 | kfree(rdev->bios); | 1814 | kfree(rdev->bios); |
| 1718 | rdev->bios = NULL; | 1815 | rdev->bios = NULL; |
| @@ -1798,8 +1895,461 @@ int r600_ib_test(struct radeon_device *rdev) | |||
| 1798 | return r; | 1895 | return r; |
| 1799 | } | 1896 | } |
| 1800 | 1897 | ||
| 1898 | /* | ||
| 1899 | * Interrupts | ||
| 1900 | * | ||
| 1901 | * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty | ||
| 1902 | * the same as the CP ring buffer, but in reverse. Rather than the CPU | ||
| 1903 | * writing to the ring and the GPU consuming, the GPU writes to the ring | ||
| 1904 | * and host consumes. As the host irq handler processes interrupts, it | ||
| 1905 | * increments the rptr. When the rptr catches up with the wptr, all the | ||
| 1906 | * current interrupts have been processed. | ||
| 1907 | */ | ||
| 1908 | |||
| 1909 | void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size) | ||
| 1910 | { | ||
| 1911 | u32 rb_bufsz; | ||
| 1801 | 1912 | ||
| 1913 | /* Align ring size */ | ||
| 1914 | rb_bufsz = drm_order(ring_size / 4); | ||
| 1915 | ring_size = (1 << rb_bufsz) * 4; | ||
| 1916 | rdev->ih.ring_size = ring_size; | ||
| 1917 | rdev->ih.align_mask = 4 - 1; | ||
| 1918 | } | ||
| 1802 | 1919 | ||
| 1920 | static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size) | ||
| 1921 | { | ||
| 1922 | int r; | ||
| 1923 | |||
| 1924 | rdev->ih.ring_size = ring_size; | ||
| 1925 | /* Allocate ring buffer */ | ||
| 1926 | if (rdev->ih.ring_obj == NULL) { | ||
| 1927 | r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, | ||
| 1928 | true, | ||
| 1929 | RADEON_GEM_DOMAIN_GTT, | ||
| 1930 | &rdev->ih.ring_obj); | ||
| 1931 | if (r) { | ||
| 1932 | DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r); | ||
| 1933 | return r; | ||
| 1934 | } | ||
| 1935 | r = radeon_bo_reserve(rdev->ih.ring_obj, false); | ||
| 1936 | if (unlikely(r != 0)) | ||
| 1937 | return r; | ||
| 1938 | r = radeon_bo_pin(rdev->ih.ring_obj, | ||
| 1939 | RADEON_GEM_DOMAIN_GTT, | ||
| 1940 | &rdev->ih.gpu_addr); | ||
| 1941 | if (r) { | ||
| 1942 | radeon_bo_unreserve(rdev->ih.ring_obj); | ||
| 1943 | DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r); | ||
| 1944 | return r; | ||
| 1945 | } | ||
| 1946 | r = radeon_bo_kmap(rdev->ih.ring_obj, | ||
| 1947 | (void **)&rdev->ih.ring); | ||
| 1948 | radeon_bo_unreserve(rdev->ih.ring_obj); | ||
| 1949 | if (r) { | ||
| 1950 | DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r); | ||
| 1951 | return r; | ||
| 1952 | } | ||
| 1953 | } | ||
| 1954 | rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1; | ||
| 1955 | rdev->ih.rptr = 0; | ||
| 1956 | |||
| 1957 | return 0; | ||
| 1958 | } | ||
| 1959 | |||
| 1960 | static void r600_ih_ring_fini(struct radeon_device *rdev) | ||
| 1961 | { | ||
| 1962 | int r; | ||
| 1963 | if (rdev->ih.ring_obj) { | ||
| 1964 | r = radeon_bo_reserve(rdev->ih.ring_obj, false); | ||
| 1965 | if (likely(r == 0)) { | ||
| 1966 | radeon_bo_kunmap(rdev->ih.ring_obj); | ||
| 1967 | radeon_bo_unpin(rdev->ih.ring_obj); | ||
| 1968 | radeon_bo_unreserve(rdev->ih.ring_obj); | ||
| 1969 | } | ||
| 1970 | radeon_bo_unref(&rdev->ih.ring_obj); | ||
| 1971 | rdev->ih.ring = NULL; | ||
| 1972 | rdev->ih.ring_obj = NULL; | ||
| 1973 | } | ||
| 1974 | } | ||
| 1975 | |||
| 1976 | static void r600_rlc_stop(struct radeon_device *rdev) | ||
| 1977 | { | ||
| 1978 | |||
| 1979 | if (rdev->family >= CHIP_RV770) { | ||
| 1980 | /* r7xx asics need to soft reset RLC before halting */ | ||
| 1981 | WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC); | ||
| 1982 | RREG32(SRBM_SOFT_RESET); | ||
| 1983 | udelay(15000); | ||
| 1984 | WREG32(SRBM_SOFT_RESET, 0); | ||
| 1985 | RREG32(SRBM_SOFT_RESET); | ||
| 1986 | } | ||
| 1987 | |||
| 1988 | WREG32(RLC_CNTL, 0); | ||
| 1989 | } | ||
| 1990 | |||
| 1991 | static void r600_rlc_start(struct radeon_device *rdev) | ||
| 1992 | { | ||
| 1993 | WREG32(RLC_CNTL, RLC_ENABLE); | ||
| 1994 | } | ||
| 1995 | |||
| 1996 | static int r600_rlc_init(struct radeon_device *rdev) | ||
| 1997 | { | ||
| 1998 | u32 i; | ||
| 1999 | const __be32 *fw_data; | ||
| 2000 | |||
| 2001 | if (!rdev->rlc_fw) | ||
| 2002 | return -EINVAL; | ||
| 2003 | |||
| 2004 | r600_rlc_stop(rdev); | ||
| 2005 | |||
| 2006 | WREG32(RLC_HB_BASE, 0); | ||
| 2007 | WREG32(RLC_HB_CNTL, 0); | ||
| 2008 | WREG32(RLC_HB_RPTR, 0); | ||
| 2009 | WREG32(RLC_HB_WPTR, 0); | ||
| 2010 | WREG32(RLC_HB_WPTR_LSB_ADDR, 0); | ||
| 2011 | WREG32(RLC_HB_WPTR_MSB_ADDR, 0); | ||
| 2012 | WREG32(RLC_MC_CNTL, 0); | ||
| 2013 | WREG32(RLC_UCODE_CNTL, 0); | ||
| 2014 | |||
| 2015 | fw_data = (const __be32 *)rdev->rlc_fw->data; | ||
| 2016 | if (rdev->family >= CHIP_RV770) { | ||
| 2017 | for (i = 0; i < R700_RLC_UCODE_SIZE; i++) { | ||
| 2018 | WREG32(RLC_UCODE_ADDR, i); | ||
| 2019 | WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); | ||
| 2020 | } | ||
| 2021 | } else { | ||
| 2022 | for (i = 0; i < RLC_UCODE_SIZE; i++) { | ||
| 2023 | WREG32(RLC_UCODE_ADDR, i); | ||
| 2024 | WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); | ||
| 2025 | } | ||
| 2026 | } | ||
| 2027 | WREG32(RLC_UCODE_ADDR, 0); | ||
| 2028 | |||
| 2029 | r600_rlc_start(rdev); | ||
| 2030 | |||
| 2031 | return 0; | ||
| 2032 | } | ||
| 2033 | |||
| 2034 | static void r600_enable_interrupts(struct radeon_device *rdev) | ||
| 2035 | { | ||
| 2036 | u32 ih_cntl = RREG32(IH_CNTL); | ||
| 2037 | u32 ih_rb_cntl = RREG32(IH_RB_CNTL); | ||
| 2038 | |||
| 2039 | ih_cntl |= ENABLE_INTR; | ||
| 2040 | ih_rb_cntl |= IH_RB_ENABLE; | ||
| 2041 | WREG32(IH_CNTL, ih_cntl); | ||
| 2042 | WREG32(IH_RB_CNTL, ih_rb_cntl); | ||
| 2043 | rdev->ih.enabled = true; | ||
| 2044 | } | ||
| 2045 | |||
| 2046 | static void r600_disable_interrupts(struct radeon_device *rdev) | ||
| 2047 | { | ||
| 2048 | u32 ih_rb_cntl = RREG32(IH_RB_CNTL); | ||
| 2049 | u32 ih_cntl = RREG32(IH_CNTL); | ||
| 2050 | |||
| 2051 | ih_rb_cntl &= ~IH_RB_ENABLE; | ||
| 2052 | ih_cntl &= ~ENABLE_INTR; | ||
| 2053 | WREG32(IH_RB_CNTL, ih_rb_cntl); | ||
| 2054 | WREG32(IH_CNTL, ih_cntl); | ||
| 2055 | /* set rptr, wptr to 0 */ | ||
| 2056 | WREG32(IH_RB_RPTR, 0); | ||
| 2057 | WREG32(IH_RB_WPTR, 0); | ||
| 2058 | rdev->ih.enabled = false; | ||
| 2059 | rdev->ih.wptr = 0; | ||
| 2060 | rdev->ih.rptr = 0; | ||
| 2061 | } | ||
| 2062 | |||
| 2063 | int r600_irq_init(struct radeon_device *rdev) | ||
| 2064 | { | ||
| 2065 | int ret = 0; | ||
| 2066 | int rb_bufsz; | ||
| 2067 | u32 interrupt_cntl, ih_cntl, ih_rb_cntl; | ||
| 2068 | |||
| 2069 | /* allocate ring */ | ||
| 2070 | ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size); | ||
| 2071 | if (ret) | ||
| 2072 | return ret; | ||
| 2073 | |||
| 2074 | /* disable irqs */ | ||
| 2075 | r600_disable_interrupts(rdev); | ||
| 2076 | |||
| 2077 | /* init rlc */ | ||
| 2078 | ret = r600_rlc_init(rdev); | ||
| 2079 | if (ret) { | ||
| 2080 | r600_ih_ring_fini(rdev); | ||
| 2081 | return ret; | ||
| 2082 | } | ||
| 2083 | |||
| 2084 | /* setup interrupt control */ | ||
| 2085 | /* set dummy read address to ring address */ | ||
| 2086 | WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8); | ||
| 2087 | interrupt_cntl = RREG32(INTERRUPT_CNTL); | ||
| 2088 | /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi | ||
| 2089 | * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN | ||
| 2090 | */ | ||
| 2091 | interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE; | ||
| 2092 | /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */ | ||
| 2093 | interrupt_cntl &= ~IH_REQ_NONSNOOP_EN; | ||
| 2094 | WREG32(INTERRUPT_CNTL, interrupt_cntl); | ||
| 2095 | |||
| 2096 | WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); | ||
| 2097 | rb_bufsz = drm_order(rdev->ih.ring_size / 4); | ||
| 2098 | |||
| 2099 | ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | | ||
| 2100 | IH_WPTR_OVERFLOW_CLEAR | | ||
| 2101 | (rb_bufsz << 1)); | ||
| 2102 | /* WPTR writeback, not yet */ | ||
| 2103 | /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/ | ||
| 2104 | WREG32(IH_RB_WPTR_ADDR_LO, 0); | ||
| 2105 | WREG32(IH_RB_WPTR_ADDR_HI, 0); | ||
| 2106 | |||
| 2107 | WREG32(IH_RB_CNTL, ih_rb_cntl); | ||
| 2108 | |||
| 2109 | /* set rptr, wptr to 0 */ | ||
| 2110 | WREG32(IH_RB_RPTR, 0); | ||
| 2111 | WREG32(IH_RB_WPTR, 0); | ||
| 2112 | |||
| 2113 | /* Default settings for IH_CNTL (disabled at first) */ | ||
| 2114 | ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10); | ||
| 2115 | /* RPTR_REARM only works if msi's are enabled */ | ||
| 2116 | if (rdev->msi_enabled) | ||
| 2117 | ih_cntl |= RPTR_REARM; | ||
| 2118 | |||
| 2119 | #ifdef __BIG_ENDIAN | ||
| 2120 | ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT); | ||
| 2121 | #endif | ||
| 2122 | WREG32(IH_CNTL, ih_cntl); | ||
| 2123 | |||
| 2124 | /* force the active interrupt state to all disabled */ | ||
| 2125 | WREG32(CP_INT_CNTL, 0); | ||
| 2126 | WREG32(GRBM_INT_CNTL, 0); | ||
| 2127 | WREG32(DxMODE_INT_MASK, 0); | ||
| 2128 | |||
| 2129 | /* enable irqs */ | ||
| 2130 | r600_enable_interrupts(rdev); | ||
| 2131 | |||
| 2132 | return ret; | ||
| 2133 | } | ||
| 2134 | |||
| 2135 | void r600_irq_fini(struct radeon_device *rdev) | ||
| 2136 | { | ||
| 2137 | r600_disable_interrupts(rdev); | ||
| 2138 | r600_rlc_stop(rdev); | ||
| 2139 | r600_ih_ring_fini(rdev); | ||
| 2140 | } | ||
| 2141 | |||
| 2142 | int r600_irq_set(struct radeon_device *rdev) | ||
| 2143 | { | ||
| 2144 | uint32_t cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; | ||
| 2145 | uint32_t mode_int = 0; | ||
| 2146 | |||
| 2147 | /* don't enable anything if the ih is disabled */ | ||
| 2148 | if (!rdev->ih.enabled) | ||
| 2149 | return 0; | ||
| 2150 | |||
| 2151 | if (rdev->irq.sw_int) { | ||
| 2152 | DRM_DEBUG("r600_irq_set: sw int\n"); | ||
| 2153 | cp_int_cntl |= RB_INT_ENABLE; | ||
| 2154 | } | ||
| 2155 | if (rdev->irq.crtc_vblank_int[0]) { | ||
| 2156 | DRM_DEBUG("r600_irq_set: vblank 0\n"); | ||
| 2157 | mode_int |= D1MODE_VBLANK_INT_MASK; | ||
| 2158 | } | ||
| 2159 | if (rdev->irq.crtc_vblank_int[1]) { | ||
| 2160 | DRM_DEBUG("r600_irq_set: vblank 1\n"); | ||
| 2161 | mode_int |= D2MODE_VBLANK_INT_MASK; | ||
| 2162 | } | ||
| 2163 | |||
| 2164 | WREG32(CP_INT_CNTL, cp_int_cntl); | ||
| 2165 | WREG32(DxMODE_INT_MASK, mode_int); | ||
| 2166 | |||
| 2167 | return 0; | ||
| 2168 | } | ||
| 2169 | |||
| 2170 | static inline void r600_irq_ack(struct radeon_device *rdev, u32 disp_int) | ||
| 2171 | { | ||
| 2172 | |||
| 2173 | if (disp_int & LB_D1_VBLANK_INTERRUPT) | ||
| 2174 | WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); | ||
| 2175 | if (disp_int & LB_D1_VLINE_INTERRUPT) | ||
| 2176 | WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK); | ||
| 2177 | if (disp_int & LB_D2_VBLANK_INTERRUPT) | ||
| 2178 | WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); | ||
| 2179 | if (disp_int & LB_D2_VLINE_INTERRUPT) | ||
| 2180 | WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK); | ||
| 2181 | |||
| 2182 | } | ||
| 2183 | |||
| 2184 | void r600_irq_disable(struct radeon_device *rdev) | ||
| 2185 | { | ||
| 2186 | u32 disp_int; | ||
| 2187 | |||
| 2188 | r600_disable_interrupts(rdev); | ||
| 2189 | /* Wait and acknowledge irq */ | ||
| 2190 | mdelay(1); | ||
| 2191 | if (ASIC_IS_DCE3(rdev)) | ||
| 2192 | disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS); | ||
| 2193 | else | ||
| 2194 | disp_int = RREG32(DISP_INTERRUPT_STATUS); | ||
| 2195 | r600_irq_ack(rdev, disp_int); | ||
| 2196 | } | ||
| 2197 | |||
| 2198 | static inline u32 r600_get_ih_wptr(struct radeon_device *rdev) | ||
| 2199 | { | ||
| 2200 | u32 wptr, tmp; | ||
| 2201 | |||
| 2202 | /* XXX use writeback */ | ||
| 2203 | wptr = RREG32(IH_RB_WPTR); | ||
| 2204 | |||
| 2205 | if (wptr & RB_OVERFLOW) { | ||
| 2206 | WARN_ON(1); | ||
| 2207 | /* XXX deal with overflow */ | ||
| 2208 | DRM_ERROR("IH RB overflow\n"); | ||
| 2209 | tmp = RREG32(IH_RB_CNTL); | ||
| 2210 | tmp |= IH_WPTR_OVERFLOW_CLEAR; | ||
| 2211 | WREG32(IH_RB_CNTL, tmp); | ||
| 2212 | } | ||
| 2213 | wptr = wptr & WPTR_OFFSET_MASK; | ||
| 2214 | |||
| 2215 | return wptr; | ||
| 2216 | } | ||
| 2217 | |||
| 2218 | /* r600 IV Ring | ||
| 2219 | * Each IV ring entry is 128 bits: | ||
| 2220 | * [7:0] - interrupt source id | ||
| 2221 | * [31:8] - reserved | ||
| 2222 | * [59:32] - interrupt source data | ||
| 2223 | * [127:60] - reserved | ||
| 2224 | * | ||
| 2225 | * The basic interrupt vector entries | ||
| 2226 | * are decoded as follows: | ||
| 2227 | * src_id src_data description | ||
| 2228 | * 1 0 D1 Vblank | ||
| 2229 | * 1 1 D1 Vline | ||
| 2230 | * 5 0 D2 Vblank | ||
| 2231 | * 5 1 D2 Vline | ||
| 2232 | * 19 0 FP Hot plug detection A | ||
| 2233 | * 19 1 FP Hot plug detection B | ||
| 2234 | * 19 2 DAC A auto-detection | ||
| 2235 | * 19 3 DAC B auto-detection | ||
| 2236 | * 176 - CP_INT RB | ||
| 2237 | * 177 - CP_INT IB1 | ||
| 2238 | * 178 - CP_INT IB2 | ||
| 2239 | * 181 - EOP Interrupt | ||
| 2240 | * 233 - GUI Idle | ||
| 2241 | * | ||
| 2242 | * Note, these are based on r600 and may need to be | ||
| 2243 | * adjusted or added to on newer asics | ||
| 2244 | */ | ||
| 2245 | |||
| 2246 | int r600_irq_process(struct radeon_device *rdev) | ||
| 2247 | { | ||
| 2248 | u32 wptr = r600_get_ih_wptr(rdev); | ||
| 2249 | u32 rptr = rdev->ih.rptr; | ||
| 2250 | u32 src_id, src_data; | ||
| 2251 | u32 last_entry = rdev->ih.ring_size - 16; | ||
| 2252 | u32 ring_index, disp_int; | ||
| 2253 | unsigned long flags; | ||
| 2254 | |||
| 2255 | DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); | ||
| 2256 | |||
| 2257 | spin_lock_irqsave(&rdev->ih.lock, flags); | ||
| 2258 | |||
| 2259 | if (rptr == wptr) { | ||
| 2260 | spin_unlock_irqrestore(&rdev->ih.lock, flags); | ||
| 2261 | return IRQ_NONE; | ||
| 2262 | } | ||
| 2263 | if (rdev->shutdown) { | ||
| 2264 | spin_unlock_irqrestore(&rdev->ih.lock, flags); | ||
| 2265 | return IRQ_NONE; | ||
| 2266 | } | ||
| 2267 | |||
| 2268 | restart_ih: | ||
| 2269 | /* display interrupts */ | ||
| 2270 | if (ASIC_IS_DCE3(rdev)) | ||
| 2271 | disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS); | ||
| 2272 | else | ||
| 2273 | disp_int = RREG32(DISP_INTERRUPT_STATUS); | ||
| 2274 | r600_irq_ack(rdev, disp_int); | ||
| 2275 | |||
| 2276 | rdev->ih.wptr = wptr; | ||
| 2277 | while (rptr != wptr) { | ||
| 2278 | /* wptr/rptr are in bytes! */ | ||
| 2279 | ring_index = rptr / 4; | ||
| 2280 | src_id = rdev->ih.ring[ring_index] & 0xff; | ||
| 2281 | src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff; | ||
| 2282 | |||
| 2283 | switch (src_id) { | ||
| 2284 | case 1: /* D1 vblank/vline */ | ||
| 2285 | switch (src_data) { | ||
| 2286 | case 0: /* D1 vblank */ | ||
| 2287 | if (disp_int & LB_D1_VBLANK_INTERRUPT) { | ||
| 2288 | drm_handle_vblank(rdev->ddev, 0); | ||
| 2289 | disp_int &= ~LB_D1_VBLANK_INTERRUPT; | ||
| 2290 | DRM_DEBUG("IH: D1 vblank\n"); | ||
| 2291 | } | ||
| 2292 | break; | ||
| 2293 | case 1: /* D1 vline */ | ||
| 2294 | if (disp_int & LB_D1_VLINE_INTERRUPT) { | ||
| 2295 | disp_int &= ~LB_D1_VLINE_INTERRUPT; | ||
| 2296 | DRM_DEBUG("IH: D1 vline\n"); | ||
| 2297 | } | ||
| 2298 | break; | ||
| 2299 | default: | ||
| 2300 | DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); | ||
| 2301 | break; | ||
| 2302 | } | ||
| 2303 | break; | ||
| 2304 | case 5: /* D2 vblank/vline */ | ||
| 2305 | switch (src_data) { | ||
| 2306 | case 0: /* D2 vblank */ | ||
| 2307 | if (disp_int & LB_D2_VBLANK_INTERRUPT) { | ||
| 2308 | drm_handle_vblank(rdev->ddev, 1); | ||
| 2309 | disp_int &= ~LB_D2_VBLANK_INTERRUPT; | ||
| 2310 | DRM_DEBUG("IH: D2 vblank\n"); | ||
| 2311 | } | ||
| 2312 | break; | ||
| 2313 | case 1: /* D1 vline */ | ||
| 2314 | if (disp_int & LB_D2_VLINE_INTERRUPT) { | ||
| 2315 | disp_int &= ~LB_D2_VLINE_INTERRUPT; | ||
| 2316 | DRM_DEBUG("IH: D2 vline\n"); | ||
| 2317 | } | ||
| 2318 | break; | ||
| 2319 | default: | ||
| 2320 | DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); | ||
| 2321 | break; | ||
| 2322 | } | ||
| 2323 | break; | ||
| 2324 | case 176: /* CP_INT in ring buffer */ | ||
| 2325 | case 177: /* CP_INT in IB1 */ | ||
| 2326 | case 178: /* CP_INT in IB2 */ | ||
| 2327 | DRM_DEBUG("IH: CP int: 0x%08x\n", src_data); | ||
| 2328 | radeon_fence_process(rdev); | ||
| 2329 | break; | ||
| 2330 | case 181: /* CP EOP event */ | ||
| 2331 | DRM_DEBUG("IH: CP EOP\n"); | ||
| 2332 | break; | ||
| 2333 | default: | ||
| 2334 | DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); | ||
| 2335 | break; | ||
| 2336 | } | ||
| 2337 | |||
| 2338 | /* wptr/rptr are in bytes! */ | ||
| 2339 | if (rptr == last_entry) | ||
| 2340 | rptr = 0; | ||
| 2341 | else | ||
| 2342 | rptr += 16; | ||
| 2343 | } | ||
| 2344 | /* make sure wptr hasn't changed while processing */ | ||
| 2345 | wptr = r600_get_ih_wptr(rdev); | ||
| 2346 | if (wptr != rdev->ih.wptr) | ||
| 2347 | goto restart_ih; | ||
| 2348 | rdev->ih.rptr = rptr; | ||
| 2349 | WREG32(IH_RB_RPTR, rdev->ih.rptr); | ||
| 2350 | spin_unlock_irqrestore(&rdev->ih.lock, flags); | ||
| 2351 | return IRQ_HANDLED; | ||
| 2352 | } | ||
| 1803 | 2353 | ||
| 1804 | /* | 2354 | /* |
| 1805 | * Debugfs info | 2355 | * Debugfs info |
| @@ -1811,21 +2361,21 @@ static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data) | |||
| 1811 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 2361 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
| 1812 | struct drm_device *dev = node->minor->dev; | 2362 | struct drm_device *dev = node->minor->dev; |
| 1813 | struct radeon_device *rdev = dev->dev_private; | 2363 | struct radeon_device *rdev = dev->dev_private; |
| 1814 | uint32_t rdp, wdp; | ||
| 1815 | unsigned count, i, j; | 2364 | unsigned count, i, j; |
| 1816 | 2365 | ||
| 1817 | radeon_ring_free_size(rdev); | 2366 | radeon_ring_free_size(rdev); |
| 1818 | rdp = RREG32(CP_RB_RPTR); | 2367 | count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw; |
| 1819 | wdp = RREG32(CP_RB_WPTR); | ||
| 1820 | count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask; | ||
| 1821 | seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT)); | 2368 | seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT)); |
| 1822 | seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp); | 2369 | seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR)); |
| 1823 | seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); | 2370 | seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR)); |
| 2371 | seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr); | ||
| 2372 | seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr); | ||
| 1824 | seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw); | 2373 | seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw); |
| 1825 | seq_printf(m, "%u dwords in ring\n", count); | 2374 | seq_printf(m, "%u dwords in ring\n", count); |
| 2375 | i = rdev->cp.rptr; | ||
| 1826 | for (j = 0; j <= count; j++) { | 2376 | for (j = 0; j <= count; j++) { |
| 1827 | i = (rdp + j) & rdev->cp.ptr_mask; | ||
| 1828 | seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]); | 2377 | seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]); |
| 2378 | i = (i + 1) & rdev->cp.ptr_mask; | ||
| 1829 | } | 2379 | } |
| 1830 | return 0; | 2380 | return 0; |
| 1831 | } | 2381 | } |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index dbf716e1fbf3..9aecafb51b66 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
| @@ -473,9 +473,8 @@ int r600_blit_init(struct radeon_device *rdev) | |||
| 473 | obj_size += r6xx_ps_size * 4; | 473 | obj_size += r6xx_ps_size * 4; |
| 474 | obj_size = ALIGN(obj_size, 256); | 474 | obj_size = ALIGN(obj_size, 256); |
| 475 | 475 | ||
| 476 | r = radeon_object_create(rdev, NULL, obj_size, | 476 | r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM, |
| 477 | true, RADEON_GEM_DOMAIN_VRAM, | 477 | &rdev->r600_blit.shader_obj); |
| 478 | false, &rdev->r600_blit.shader_obj); | ||
| 479 | if (r) { | 478 | if (r) { |
| 480 | DRM_ERROR("r600 failed to allocate shader\n"); | 479 | DRM_ERROR("r600 failed to allocate shader\n"); |
| 481 | return r; | 480 | return r; |
| @@ -485,12 +484,14 @@ int r600_blit_init(struct radeon_device *rdev) | |||
| 485 | obj_size, | 484 | obj_size, |
| 486 | rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset); | 485 | rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset); |
| 487 | 486 | ||
| 488 | r = radeon_object_kmap(rdev->r600_blit.shader_obj, &ptr); | 487 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
| 488 | if (unlikely(r != 0)) | ||
| 489 | return r; | ||
| 490 | r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr); | ||
| 489 | if (r) { | 491 | if (r) { |
| 490 | DRM_ERROR("failed to map blit object %d\n", r); | 492 | DRM_ERROR("failed to map blit object %d\n", r); |
| 491 | return r; | 493 | return r; |
| 492 | } | 494 | } |
| 493 | |||
| 494 | if (rdev->family >= CHIP_RV770) | 495 | if (rdev->family >= CHIP_RV770) |
| 495 | memcpy_toio(ptr + rdev->r600_blit.state_offset, | 496 | memcpy_toio(ptr + rdev->r600_blit.state_offset, |
| 496 | r7xx_default_state, rdev->r600_blit.state_len * 4); | 497 | r7xx_default_state, rdev->r600_blit.state_len * 4); |
| @@ -500,19 +501,26 @@ int r600_blit_init(struct radeon_device *rdev) | |||
| 500 | if (num_packet2s) | 501 | if (num_packet2s) |
| 501 | memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), | 502 | memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), |
| 502 | packet2s, num_packet2s * 4); | 503 | packet2s, num_packet2s * 4); |
| 503 | |||
| 504 | |||
| 505 | memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4); | 504 | memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4); |
| 506 | memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); | 505 | memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); |
| 507 | 506 | radeon_bo_kunmap(rdev->r600_blit.shader_obj); | |
| 508 | radeon_object_kunmap(rdev->r600_blit.shader_obj); | 507 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); |
| 509 | return 0; | 508 | return 0; |
| 510 | } | 509 | } |
| 511 | 510 | ||
| 512 | void r600_blit_fini(struct radeon_device *rdev) | 511 | void r600_blit_fini(struct radeon_device *rdev) |
| 513 | { | 512 | { |
| 514 | radeon_object_unpin(rdev->r600_blit.shader_obj); | 513 | int r; |
| 515 | radeon_object_unref(&rdev->r600_blit.shader_obj); | 514 | |
| 515 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | ||
| 516 | if (unlikely(r != 0)) { | ||
| 517 | dev_err(rdev->dev, "(%d) can't finish r600 blit\n", r); | ||
| 518 | goto out_unref; | ||
| 519 | } | ||
| 520 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | ||
| 521 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
| 522 | out_unref: | ||
| 523 | radeon_bo_unref(&rdev->r600_blit.shader_obj); | ||
| 516 | } | 524 | } |
| 517 | 525 | ||
| 518 | int r600_vb_ib_get(struct radeon_device *rdev) | 526 | int r600_vb_ib_get(struct radeon_device *rdev) |
| @@ -569,9 +577,9 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes) | |||
| 569 | ring_size = num_loops * dwords_per_loop; | 577 | ring_size = num_loops * dwords_per_loop; |
| 570 | /* set default + shaders */ | 578 | /* set default + shaders */ |
| 571 | ring_size += 40; /* shaders + def state */ | 579 | ring_size += 40; /* shaders + def state */ |
| 572 | ring_size += 3; /* fence emit for VB IB */ | 580 | ring_size += 5; /* fence emit for VB IB */ |
| 573 | ring_size += 5; /* done copy */ | 581 | ring_size += 5; /* done copy */ |
| 574 | ring_size += 3; /* fence emit for done copy */ | 582 | ring_size += 5; /* fence emit for done copy */ |
| 575 | r = radeon_ring_lock(rdev, ring_size); | 583 | r = radeon_ring_lock(rdev, ring_size); |
| 576 | WARN_ON(r); | 584 | WARN_ON(r); |
| 577 | 585 | ||
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 27ab428b149b..61ccde5637d7 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
| @@ -456,7 +456,163 @@ | |||
| 456 | #define WAIT_2D_IDLECLEAN_bit (1 << 16) | 456 | #define WAIT_2D_IDLECLEAN_bit (1 << 16) |
| 457 | #define WAIT_3D_IDLECLEAN_bit (1 << 17) | 457 | #define WAIT_3D_IDLECLEAN_bit (1 << 17) |
| 458 | 458 | ||
| 459 | 459 | #define IH_RB_CNTL 0x3e00 | |
| 460 | # define IH_RB_ENABLE (1 << 0) | ||
| 461 | # define IH_IB_SIZE(x) ((x) << 1) /* log2 */ | ||
| 462 | # define IH_RB_FULL_DRAIN_ENABLE (1 << 6) | ||
| 463 | # define IH_WPTR_WRITEBACK_ENABLE (1 << 8) | ||
| 464 | # define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */ | ||
| 465 | # define IH_WPTR_OVERFLOW_ENABLE (1 << 16) | ||
| 466 | # define IH_WPTR_OVERFLOW_CLEAR (1 << 31) | ||
| 467 | #define IH_RB_BASE 0x3e04 | ||
| 468 | #define IH_RB_RPTR 0x3e08 | ||
| 469 | #define IH_RB_WPTR 0x3e0c | ||
| 470 | # define RB_OVERFLOW (1 << 0) | ||
| 471 | # define WPTR_OFFSET_MASK 0x3fffc | ||
| 472 | #define IH_RB_WPTR_ADDR_HI 0x3e10 | ||
| 473 | #define IH_RB_WPTR_ADDR_LO 0x3e14 | ||
| 474 | #define IH_CNTL 0x3e18 | ||
| 475 | # define ENABLE_INTR (1 << 0) | ||
| 476 | # define IH_MC_SWAP(x) ((x) << 2) | ||
| 477 | # define IH_MC_SWAP_NONE 0 | ||
| 478 | # define IH_MC_SWAP_16BIT 1 | ||
| 479 | # define IH_MC_SWAP_32BIT 2 | ||
| 480 | # define IH_MC_SWAP_64BIT 3 | ||
| 481 | # define RPTR_REARM (1 << 4) | ||
| 482 | # define MC_WRREQ_CREDIT(x) ((x) << 15) | ||
| 483 | # define MC_WR_CLEAN_CNT(x) ((x) << 20) | ||
| 484 | |||
| 485 | #define RLC_CNTL 0x3f00 | ||
| 486 | # define RLC_ENABLE (1 << 0) | ||
| 487 | #define RLC_HB_BASE 0x3f10 | ||
| 488 | #define RLC_HB_CNTL 0x3f0c | ||
| 489 | #define RLC_HB_RPTR 0x3f20 | ||
| 490 | #define RLC_HB_WPTR 0x3f1c | ||
| 491 | #define RLC_HB_WPTR_LSB_ADDR 0x3f14 | ||
| 492 | #define RLC_HB_WPTR_MSB_ADDR 0x3f18 | ||
| 493 | #define RLC_MC_CNTL 0x3f44 | ||
| 494 | #define RLC_UCODE_CNTL 0x3f48 | ||
| 495 | #define RLC_UCODE_ADDR 0x3f2c | ||
| 496 | #define RLC_UCODE_DATA 0x3f30 | ||
| 497 | |||
| 498 | #define SRBM_SOFT_RESET 0xe60 | ||
| 499 | # define SOFT_RESET_RLC (1 << 13) | ||
| 500 | |||
| 501 | #define CP_INT_CNTL 0xc124 | ||
| 502 | # define CNTX_BUSY_INT_ENABLE (1 << 19) | ||
| 503 | # define CNTX_EMPTY_INT_ENABLE (1 << 20) | ||
| 504 | # define SCRATCH_INT_ENABLE (1 << 25) | ||
| 505 | # define TIME_STAMP_INT_ENABLE (1 << 26) | ||
| 506 | # define IB2_INT_ENABLE (1 << 29) | ||
| 507 | # define IB1_INT_ENABLE (1 << 30) | ||
| 508 | # define RB_INT_ENABLE (1 << 31) | ||
| 509 | #define CP_INT_STATUS 0xc128 | ||
| 510 | # define SCRATCH_INT_STAT (1 << 25) | ||
| 511 | # define TIME_STAMP_INT_STAT (1 << 26) | ||
| 512 | # define IB2_INT_STAT (1 << 29) | ||
| 513 | # define IB1_INT_STAT (1 << 30) | ||
| 514 | # define RB_INT_STAT (1 << 31) | ||
| 515 | |||
| 516 | #define GRBM_INT_CNTL 0x8060 | ||
| 517 | # define RDERR_INT_ENABLE (1 << 0) | ||
| 518 | # define WAIT_COUNT_TIMEOUT_INT_ENABLE (1 << 1) | ||
| 519 | # define GUI_IDLE_INT_ENABLE (1 << 19) | ||
| 520 | |||
| 521 | #define INTERRUPT_CNTL 0x5468 | ||
| 522 | # define IH_DUMMY_RD_OVERRIDE (1 << 0) | ||
| 523 | # define IH_DUMMY_RD_EN (1 << 1) | ||
| 524 | # define IH_REQ_NONSNOOP_EN (1 << 3) | ||
| 525 | # define GEN_IH_INT_EN (1 << 8) | ||
| 526 | #define INTERRUPT_CNTL2 0x546c | ||
| 527 | |||
| 528 | #define D1MODE_VBLANK_STATUS 0x6534 | ||
| 529 | #define D2MODE_VBLANK_STATUS 0x6d34 | ||
| 530 | # define DxMODE_VBLANK_OCCURRED (1 << 0) | ||
| 531 | # define DxMODE_VBLANK_ACK (1 << 4) | ||
| 532 | # define DxMODE_VBLANK_STAT (1 << 12) | ||
| 533 | # define DxMODE_VBLANK_INTERRUPT (1 << 16) | ||
| 534 | # define DxMODE_VBLANK_INTERRUPT_TYPE (1 << 17) | ||
| 535 | #define D1MODE_VLINE_STATUS 0x653c | ||
| 536 | #define D2MODE_VLINE_STATUS 0x6d3c | ||
| 537 | # define DxMODE_VLINE_OCCURRED (1 << 0) | ||
| 538 | # define DxMODE_VLINE_ACK (1 << 4) | ||
| 539 | # define DxMODE_VLINE_STAT (1 << 12) | ||
| 540 | # define DxMODE_VLINE_INTERRUPT (1 << 16) | ||
| 541 | # define DxMODE_VLINE_INTERRUPT_TYPE (1 << 17) | ||
| 542 | #define DxMODE_INT_MASK 0x6540 | ||
| 543 | # define D1MODE_VBLANK_INT_MASK (1 << 0) | ||
| 544 | # define D1MODE_VLINE_INT_MASK (1 << 4) | ||
| 545 | # define D2MODE_VBLANK_INT_MASK (1 << 8) | ||
| 546 | # define D2MODE_VLINE_INT_MASK (1 << 12) | ||
| 547 | #define DCE3_DISP_INTERRUPT_STATUS 0x7ddc | ||
| 548 | # define DC_HPD1_INTERRUPT (1 << 18) | ||
| 549 | # define DC_HPD2_INTERRUPT (1 << 19) | ||
| 550 | #define DISP_INTERRUPT_STATUS 0x7edc | ||
| 551 | # define LB_D1_VLINE_INTERRUPT (1 << 2) | ||
| 552 | # define LB_D2_VLINE_INTERRUPT (1 << 3) | ||
| 553 | # define LB_D1_VBLANK_INTERRUPT (1 << 4) | ||
| 554 | # define LB_D2_VBLANK_INTERRUPT (1 << 5) | ||
| 555 | # define DACA_AUTODETECT_INTERRUPT (1 << 16) | ||
| 556 | # define DACB_AUTODETECT_INTERRUPT (1 << 17) | ||
| 557 | # define DC_HOT_PLUG_DETECT1_INTERRUPT (1 << 18) | ||
| 558 | # define DC_HOT_PLUG_DETECT2_INTERRUPT (1 << 19) | ||
| 559 | # define DC_I2C_SW_DONE_INTERRUPT (1 << 20) | ||
| 560 | # define DC_I2C_HW_DONE_INTERRUPT (1 << 21) | ||
| 561 | #define DCE3_DISP_INTERRUPT_STATUS_CONTINUE 0x7de8 | ||
| 562 | # define DC_HPD4_INTERRUPT (1 << 14) | ||
| 563 | # define DC_HPD4_RX_INTERRUPT (1 << 15) | ||
| 564 | # define DC_HPD3_INTERRUPT (1 << 28) | ||
| 565 | # define DC_HPD1_RX_INTERRUPT (1 << 29) | ||
| 566 | # define DC_HPD2_RX_INTERRUPT (1 << 30) | ||
| 567 | #define DCE3_DISP_INTERRUPT_STATUS_CONTINUE2 0x7dec | ||
| 568 | # define DC_HPD3_RX_INTERRUPT (1 << 0) | ||
| 569 | # define DIGA_DP_VID_STREAM_DISABLE_INTERRUPT (1 << 1) | ||
| 570 | # define DIGA_DP_STEER_FIFO_OVERFLOW_INTERRUPT (1 << 2) | ||
| 571 | # define DIGB_DP_VID_STREAM_DISABLE_INTERRUPT (1 << 3) | ||
| 572 | # define DIGB_DP_STEER_FIFO_OVERFLOW_INTERRUPT (1 << 4) | ||
| 573 | # define AUX1_SW_DONE_INTERRUPT (1 << 5) | ||
| 574 | # define AUX1_LS_DONE_INTERRUPT (1 << 6) | ||
| 575 | # define AUX2_SW_DONE_INTERRUPT (1 << 7) | ||
| 576 | # define AUX2_LS_DONE_INTERRUPT (1 << 8) | ||
| 577 | # define AUX3_SW_DONE_INTERRUPT (1 << 9) | ||
| 578 | # define AUX3_LS_DONE_INTERRUPT (1 << 10) | ||
| 579 | # define AUX4_SW_DONE_INTERRUPT (1 << 11) | ||
| 580 | # define AUX4_LS_DONE_INTERRUPT (1 << 12) | ||
| 581 | # define DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT (1 << 13) | ||
| 582 | # define DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT (1 << 14) | ||
| 583 | /* DCE 3.2 */ | ||
| 584 | # define AUX5_SW_DONE_INTERRUPT (1 << 15) | ||
| 585 | # define AUX5_LS_DONE_INTERRUPT (1 << 16) | ||
| 586 | # define AUX6_SW_DONE_INTERRUPT (1 << 17) | ||
| 587 | # define AUX6_LS_DONE_INTERRUPT (1 << 18) | ||
| 588 | # define DC_HPD5_INTERRUPT (1 << 19) | ||
| 589 | # define DC_HPD5_RX_INTERRUPT (1 << 20) | ||
| 590 | # define DC_HPD6_INTERRUPT (1 << 21) | ||
| 591 | # define DC_HPD6_RX_INTERRUPT (1 << 22) | ||
| 592 | |||
| 593 | #define DCE3_DACA_AUTODETECT_INT_CONTROL 0x7038 | ||
| 594 | #define DCE3_DACB_AUTODETECT_INT_CONTROL 0x7138 | ||
| 595 | #define DACA_AUTODETECT_INT_CONTROL 0x7838 | ||
| 596 | #define DACB_AUTODETECT_INT_CONTROL 0x7a38 | ||
| 597 | # define DACx_AUTODETECT_ACK (1 << 0) | ||
| 598 | # define DACx_AUTODETECT_INT_ENABLE (1 << 16) | ||
| 599 | |||
| 600 | #define DC_HOT_PLUG_DETECT1_INT_CONTROL 0x7d08 | ||
| 601 | #define DC_HOT_PLUG_DETECT2_INT_CONTROL 0x7d18 | ||
| 602 | #define DC_HOT_PLUG_DETECT3_INT_CONTROL 0x7d2c | ||
| 603 | # define DC_HOT_PLUG_DETECTx_INT_ACK (1 << 0) | ||
| 604 | # define DC_HOT_PLUG_DETECTx_INT_POLARITY (1 << 8) | ||
| 605 | # define DC_HOT_PLUG_DETECTx_INT_EN (1 << 16) | ||
| 606 | /* DCE 3.2 */ | ||
| 607 | #define DC_HPD1_INT_CONTROL 0x7d04 | ||
| 608 | #define DC_HPD2_INT_CONTROL 0x7d10 | ||
| 609 | #define DC_HPD3_INT_CONTROL 0x7d1c | ||
| 610 | #define DC_HPD4_INT_CONTROL 0x7d28 | ||
| 611 | # define DC_HPDx_INT_ACK (1 << 0) | ||
| 612 | # define DC_HPDx_INT_POLARITY (1 << 8) | ||
| 613 | # define DC_HPDx_INT_EN (1 << 16) | ||
| 614 | # define DC_HPDx_RX_INT_ACK (1 << 20) | ||
| 615 | # define DC_HPDx_RX_INT_EN (1 << 24) | ||
| 460 | 616 | ||
| 461 | /* | 617 | /* |
| 462 | * PM4 | 618 | * PM4 |
| @@ -500,7 +656,6 @@ | |||
| 500 | #define PACKET3_WAIT_REG_MEM 0x3C | 656 | #define PACKET3_WAIT_REG_MEM 0x3C |
| 501 | #define PACKET3_MEM_WRITE 0x3D | 657 | #define PACKET3_MEM_WRITE 0x3D |
| 502 | #define PACKET3_INDIRECT_BUFFER 0x32 | 658 | #define PACKET3_INDIRECT_BUFFER 0x32 |
| 503 | #define PACKET3_CP_INTERRUPT 0x40 | ||
| 504 | #define PACKET3_SURFACE_SYNC 0x43 | 659 | #define PACKET3_SURFACE_SYNC 0x43 |
| 505 | # define PACKET3_CB0_DEST_BASE_ENA (1 << 6) | 660 | # define PACKET3_CB0_DEST_BASE_ENA (1 << 6) |
| 506 | # define PACKET3_TC_ACTION_ENA (1 << 23) | 661 | # define PACKET3_TC_ACTION_ENA (1 << 23) |
| @@ -674,4 +829,5 @@ | |||
| 674 | #define S_000E60_SOFT_RESET_TSC(x) (((x) & 1) << 16) | 829 | #define S_000E60_SOFT_RESET_TSC(x) (((x) & 1) << 16) |
| 675 | #define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17) | 830 | #define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17) |
| 676 | 831 | ||
| 832 | #define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 | ||
| 677 | #endif | 833 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 224506a2f7b1..f3deb4982b2d 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -28,8 +28,6 @@ | |||
| 28 | #ifndef __RADEON_H__ | 28 | #ifndef __RADEON_H__ |
| 29 | #define __RADEON_H__ | 29 | #define __RADEON_H__ |
| 30 | 30 | ||
| 31 | #include "radeon_object.h" | ||
| 32 | |||
| 33 | /* TODO: Here are things that needs to be done : | 31 | /* TODO: Here are things that needs to be done : |
| 34 | * - surface allocator & initializer : (bit like scratch reg) should | 32 | * - surface allocator & initializer : (bit like scratch reg) should |
| 35 | * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings | 33 | * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings |
| @@ -67,6 +65,11 @@ | |||
| 67 | #include <linux/list.h> | 65 | #include <linux/list.h> |
| 68 | #include <linux/kref.h> | 66 | #include <linux/kref.h> |
| 69 | 67 | ||
| 68 | #include <ttm/ttm_bo_api.h> | ||
| 69 | #include <ttm/ttm_bo_driver.h> | ||
| 70 | #include <ttm/ttm_placement.h> | ||
| 71 | #include <ttm/ttm_module.h> | ||
| 72 | |||
| 70 | #include "radeon_family.h" | 73 | #include "radeon_family.h" |
| 71 | #include "radeon_mode.h" | 74 | #include "radeon_mode.h" |
| 72 | #include "radeon_reg.h" | 75 | #include "radeon_reg.h" |
| @@ -186,76 +189,60 @@ void radeon_fence_unref(struct radeon_fence **fence); | |||
| 186 | * Tiling registers | 189 | * Tiling registers |
| 187 | */ | 190 | */ |
| 188 | struct radeon_surface_reg { | 191 | struct radeon_surface_reg { |
| 189 | struct radeon_object *robj; | 192 | struct radeon_bo *bo; |
| 190 | }; | 193 | }; |
| 191 | 194 | ||
| 192 | #define RADEON_GEM_MAX_SURFACES 8 | 195 | #define RADEON_GEM_MAX_SURFACES 8 |
| 193 | 196 | ||
| 194 | /* | 197 | /* |
| 195 | * Radeon buffer. | 198 | * TTM. |
| 196 | */ | 199 | */ |
| 197 | struct radeon_object; | 200 | struct radeon_mman { |
| 201 | struct ttm_bo_global_ref bo_global_ref; | ||
| 202 | struct ttm_global_reference mem_global_ref; | ||
| 203 | bool mem_global_referenced; | ||
| 204 | struct ttm_bo_device bdev; | ||
| 205 | }; | ||
| 206 | |||
| 207 | struct radeon_bo { | ||
| 208 | /* Protected by gem.mutex */ | ||
| 209 | struct list_head list; | ||
| 210 | /* Protected by tbo.reserved */ | ||
| 211 | struct ttm_buffer_object tbo; | ||
| 212 | struct ttm_bo_kmap_obj kmap; | ||
| 213 | unsigned pin_count; | ||
| 214 | void *kptr; | ||
| 215 | u32 tiling_flags; | ||
| 216 | u32 pitch; | ||
| 217 | int surface_reg; | ||
| 218 | /* Constant after initialization */ | ||
| 219 | struct radeon_device *rdev; | ||
| 220 | struct drm_gem_object *gobj; | ||
| 221 | }; | ||
| 198 | 222 | ||
| 199 | struct radeon_object_list { | 223 | struct radeon_bo_list { |
| 200 | struct list_head list; | 224 | struct list_head list; |
| 201 | struct radeon_object *robj; | 225 | struct radeon_bo *bo; |
| 202 | uint64_t gpu_offset; | 226 | uint64_t gpu_offset; |
| 203 | unsigned rdomain; | 227 | unsigned rdomain; |
| 204 | unsigned wdomain; | 228 | unsigned wdomain; |
| 205 | uint32_t tiling_flags; | 229 | u32 tiling_flags; |
| 206 | }; | 230 | }; |
| 207 | 231 | ||
| 208 | int radeon_object_init(struct radeon_device *rdev); | ||
| 209 | void radeon_object_fini(struct radeon_device *rdev); | ||
| 210 | int radeon_object_create(struct radeon_device *rdev, | ||
| 211 | struct drm_gem_object *gobj, | ||
| 212 | unsigned long size, | ||
| 213 | bool kernel, | ||
| 214 | uint32_t domain, | ||
| 215 | bool interruptible, | ||
| 216 | struct radeon_object **robj_ptr); | ||
| 217 | int radeon_object_kmap(struct radeon_object *robj, void **ptr); | ||
| 218 | void radeon_object_kunmap(struct radeon_object *robj); | ||
| 219 | void radeon_object_unref(struct radeon_object **robj); | ||
| 220 | int radeon_object_pin(struct radeon_object *robj, uint32_t domain, | ||
| 221 | uint64_t *gpu_addr); | ||
| 222 | void radeon_object_unpin(struct radeon_object *robj); | ||
| 223 | int radeon_object_wait(struct radeon_object *robj); | ||
| 224 | int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement); | ||
| 225 | int radeon_object_evict_vram(struct radeon_device *rdev); | ||
| 226 | int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset); | ||
| 227 | void radeon_object_force_delete(struct radeon_device *rdev); | ||
| 228 | void radeon_object_list_add_object(struct radeon_object_list *lobj, | ||
| 229 | struct list_head *head); | ||
| 230 | int radeon_object_list_validate(struct list_head *head, void *fence); | ||
| 231 | void radeon_object_list_unvalidate(struct list_head *head); | ||
| 232 | void radeon_object_list_clean(struct list_head *head); | ||
| 233 | int radeon_object_fbdev_mmap(struct radeon_object *robj, | ||
| 234 | struct vm_area_struct *vma); | ||
| 235 | unsigned long radeon_object_size(struct radeon_object *robj); | ||
| 236 | void radeon_object_clear_surface_reg(struct radeon_object *robj); | ||
| 237 | int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, | ||
| 238 | bool force_drop); | ||
| 239 | void radeon_object_set_tiling_flags(struct radeon_object *robj, | ||
| 240 | uint32_t tiling_flags, uint32_t pitch); | ||
| 241 | void radeon_object_get_tiling_flags(struct radeon_object *robj, uint32_t *tiling_flags, uint32_t *pitch); | ||
| 242 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, | ||
| 243 | struct ttm_mem_reg *mem); | ||
| 244 | void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); | ||
| 245 | /* | 232 | /* |
| 246 | * GEM objects. | 233 | * GEM objects. |
| 247 | */ | 234 | */ |
| 248 | struct radeon_gem { | 235 | struct radeon_gem { |
| 236 | struct mutex mutex; | ||
| 249 | struct list_head objects; | 237 | struct list_head objects; |
| 250 | }; | 238 | }; |
| 251 | 239 | ||
| 252 | int radeon_gem_init(struct radeon_device *rdev); | 240 | int radeon_gem_init(struct radeon_device *rdev); |
| 253 | void radeon_gem_fini(struct radeon_device *rdev); | 241 | void radeon_gem_fini(struct radeon_device *rdev); |
| 254 | int radeon_gem_object_create(struct radeon_device *rdev, int size, | 242 | int radeon_gem_object_create(struct radeon_device *rdev, int size, |
| 255 | int alignment, int initial_domain, | 243 | int alignment, int initial_domain, |
| 256 | bool discardable, bool kernel, | 244 | bool discardable, bool kernel, |
| 257 | bool interruptible, | 245 | struct drm_gem_object **obj); |
| 258 | struct drm_gem_object **obj); | ||
| 259 | int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, | 246 | int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, |
| 260 | uint64_t *gpu_addr); | 247 | uint64_t *gpu_addr); |
| 261 | void radeon_gem_object_unpin(struct drm_gem_object *obj); | 248 | void radeon_gem_object_unpin(struct drm_gem_object *obj); |
| @@ -271,7 +258,7 @@ struct radeon_gart_table_ram { | |||
| 271 | }; | 258 | }; |
| 272 | 259 | ||
| 273 | struct radeon_gart_table_vram { | 260 | struct radeon_gart_table_vram { |
| 274 | struct radeon_object *robj; | 261 | struct radeon_bo *robj; |
| 275 | volatile uint32_t *ptr; | 262 | volatile uint32_t *ptr; |
| 276 | }; | 263 | }; |
| 277 | 264 | ||
| @@ -352,11 +339,14 @@ struct radeon_irq { | |||
| 352 | bool sw_int; | 339 | bool sw_int; |
| 353 | /* FIXME: use a define max crtc rather than hardcode it */ | 340 | /* FIXME: use a define max crtc rather than hardcode it */ |
| 354 | bool crtc_vblank_int[2]; | 341 | bool crtc_vblank_int[2]; |
| 342 | spinlock_t sw_lock; | ||
| 343 | int sw_refcount; | ||
| 355 | }; | 344 | }; |
| 356 | 345 | ||
| 357 | int radeon_irq_kms_init(struct radeon_device *rdev); | 346 | int radeon_irq_kms_init(struct radeon_device *rdev); |
| 358 | void radeon_irq_kms_fini(struct radeon_device *rdev); | 347 | void radeon_irq_kms_fini(struct radeon_device *rdev); |
| 359 | 348 | void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev); | |
| 349 | void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev); | ||
| 360 | 350 | ||
| 361 | /* | 351 | /* |
| 362 | * CP & ring. | 352 | * CP & ring. |
| @@ -376,7 +366,7 @@ struct radeon_ib { | |||
| 376 | */ | 366 | */ |
| 377 | struct radeon_ib_pool { | 367 | struct radeon_ib_pool { |
| 378 | struct mutex mutex; | 368 | struct mutex mutex; |
| 379 | struct radeon_object *robj; | 369 | struct radeon_bo *robj; |
| 380 | struct list_head scheduled_ibs; | 370 | struct list_head scheduled_ibs; |
| 381 | struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; | 371 | struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; |
| 382 | bool ready; | 372 | bool ready; |
| @@ -384,7 +374,7 @@ struct radeon_ib_pool { | |||
| 384 | }; | 374 | }; |
| 385 | 375 | ||
| 386 | struct radeon_cp { | 376 | struct radeon_cp { |
| 387 | struct radeon_object *ring_obj; | 377 | struct radeon_bo *ring_obj; |
| 388 | volatile uint32_t *ring; | 378 | volatile uint32_t *ring; |
| 389 | unsigned rptr; | 379 | unsigned rptr; |
| 390 | unsigned wptr; | 380 | unsigned wptr; |
| @@ -399,8 +389,25 @@ struct radeon_cp { | |||
| 399 | bool ready; | 389 | bool ready; |
| 400 | }; | 390 | }; |
| 401 | 391 | ||
| 392 | /* | ||
| 393 | * R6xx+ IH ring | ||
| 394 | */ | ||
| 395 | struct r600_ih { | ||
| 396 | struct radeon_bo *ring_obj; | ||
| 397 | volatile uint32_t *ring; | ||
| 398 | unsigned rptr; | ||
| 399 | unsigned wptr; | ||
| 400 | unsigned wptr_old; | ||
| 401 | unsigned ring_size; | ||
| 402 | uint64_t gpu_addr; | ||
| 403 | uint32_t align_mask; | ||
| 404 | uint32_t ptr_mask; | ||
| 405 | spinlock_t lock; | ||
| 406 | bool enabled; | ||
| 407 | }; | ||
| 408 | |||
| 402 | struct r600_blit { | 409 | struct r600_blit { |
| 403 | struct radeon_object *shader_obj; | 410 | struct radeon_bo *shader_obj; |
| 404 | u64 shader_gpu_addr; | 411 | u64 shader_gpu_addr; |
| 405 | u32 vs_offset, ps_offset; | 412 | u32 vs_offset, ps_offset; |
| 406 | u32 state_offset; | 413 | u32 state_offset; |
| @@ -430,8 +437,8 @@ void radeon_ring_fini(struct radeon_device *rdev); | |||
| 430 | */ | 437 | */ |
| 431 | struct radeon_cs_reloc { | 438 | struct radeon_cs_reloc { |
| 432 | struct drm_gem_object *gobj; | 439 | struct drm_gem_object *gobj; |
| 433 | struct radeon_object *robj; | 440 | struct radeon_bo *robj; |
| 434 | struct radeon_object_list lobj; | 441 | struct radeon_bo_list lobj; |
| 435 | uint32_t handle; | 442 | uint32_t handle; |
| 436 | uint32_t flags; | 443 | uint32_t flags; |
| 437 | }; | 444 | }; |
| @@ -527,7 +534,7 @@ void radeon_agp_fini(struct radeon_device *rdev); | |||
| 527 | * Writeback | 534 | * Writeback |
| 528 | */ | 535 | */ |
| 529 | struct radeon_wb { | 536 | struct radeon_wb { |
| 530 | struct radeon_object *wb_obj; | 537 | struct radeon_bo *wb_obj; |
| 531 | volatile uint32_t *wb; | 538 | volatile uint32_t *wb; |
| 532 | uint64_t gpu_addr; | 539 | uint64_t gpu_addr; |
| 533 | }; | 540 | }; |
| @@ -639,6 +646,7 @@ struct radeon_asic { | |||
| 639 | uint32_t offset, uint32_t obj_size); | 646 | uint32_t offset, uint32_t obj_size); |
| 640 | int (*clear_surface_reg)(struct radeon_device *rdev, int reg); | 647 | int (*clear_surface_reg)(struct radeon_device *rdev, int reg); |
| 641 | void (*bandwidth_update)(struct radeon_device *rdev); | 648 | void (*bandwidth_update)(struct radeon_device *rdev); |
| 649 | void (*hdp_flush)(struct radeon_device *rdev); | ||
| 642 | }; | 650 | }; |
| 643 | 651 | ||
| 644 | /* | 652 | /* |
| @@ -751,9 +759,9 @@ struct radeon_device { | |||
| 751 | uint8_t *bios; | 759 | uint8_t *bios; |
| 752 | bool is_atom_bios; | 760 | bool is_atom_bios; |
| 753 | uint16_t bios_header_start; | 761 | uint16_t bios_header_start; |
| 754 | struct radeon_object *stollen_vga_memory; | 762 | struct radeon_bo *stollen_vga_memory; |
| 755 | struct fb_info *fbdev_info; | 763 | struct fb_info *fbdev_info; |
| 756 | struct radeon_object *fbdev_robj; | 764 | struct radeon_bo *fbdev_rbo; |
| 757 | struct radeon_framebuffer *fbdev_rfb; | 765 | struct radeon_framebuffer *fbdev_rfb; |
| 758 | /* Register mmio */ | 766 | /* Register mmio */ |
| 759 | resource_size_t rmmio_base; | 767 | resource_size_t rmmio_base; |
| @@ -791,8 +799,10 @@ struct radeon_device { | |||
| 791 | struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; | 799 | struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; |
| 792 | const struct firmware *me_fw; /* all family ME firmware */ | 800 | const struct firmware *me_fw; /* all family ME firmware */ |
| 793 | const struct firmware *pfp_fw; /* r6/700 PFP firmware */ | 801 | const struct firmware *pfp_fw; /* r6/700 PFP firmware */ |
| 802 | const struct firmware *rlc_fw; /* r6/700 RLC firmware */ | ||
| 794 | struct r600_blit r600_blit; | 803 | struct r600_blit r600_blit; |
| 795 | int msi_enabled; /* msi enabled */ | 804 | int msi_enabled; /* msi enabled */ |
| 805 | struct r600_ih ih; /* r6/700 interrupt ring */ | ||
| 796 | }; | 806 | }; |
| 797 | 807 | ||
| 798 | int radeon_device_init(struct radeon_device *rdev, | 808 | int radeon_device_init(struct radeon_device *rdev, |
| @@ -829,6 +839,10 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32 | |||
| 829 | } | 839 | } |
| 830 | } | 840 | } |
| 831 | 841 | ||
| 842 | /* | ||
| 843 | * Cast helper | ||
| 844 | */ | ||
| 845 | #define to_radeon_fence(p) ((struct radeon_fence *)(p)) | ||
| 832 | 846 | ||
| 833 | /* | 847 | /* |
| 834 | * Registers read & write functions. | 848 | * Registers read & write functions. |
| @@ -965,18 +979,20 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) | |||
| 965 | #define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev)) | 979 | #define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev)) |
| 966 | #define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) | 980 | #define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) |
| 967 | #define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev)) | 981 | #define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev)) |
| 968 | #define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) | 982 | #define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e)) |
| 969 | #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) | 983 | #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) |
| 970 | #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) | 984 | #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) |
| 971 | #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) | 985 | #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) |
| 972 | #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) | 986 | #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) |
| 973 | #define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) | 987 | #define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) |
| 988 | #define radeon_hdp_flush(rdev) (rdev)->asic->hdp_flush((rdev)) | ||
| 974 | 989 | ||
| 975 | /* Common functions */ | 990 | /* Common functions */ |
| 976 | extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); | 991 | extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); |
| 977 | extern int radeon_modeset_init(struct radeon_device *rdev); | 992 | extern int radeon_modeset_init(struct radeon_device *rdev); |
| 978 | extern void radeon_modeset_fini(struct radeon_device *rdev); | 993 | extern void radeon_modeset_fini(struct radeon_device *rdev); |
| 979 | extern bool radeon_card_posted(struct radeon_device *rdev); | 994 | extern bool radeon_card_posted(struct radeon_device *rdev); |
| 995 | extern bool radeon_boot_test_post_card(struct radeon_device *rdev); | ||
| 980 | extern int radeon_clocks_init(struct radeon_device *rdev); | 996 | extern int radeon_clocks_init(struct radeon_device *rdev); |
| 981 | extern void radeon_clocks_fini(struct radeon_device *rdev); | 997 | extern void radeon_clocks_fini(struct radeon_device *rdev); |
| 982 | extern void radeon_scratch_init(struct radeon_device *rdev); | 998 | extern void radeon_scratch_init(struct radeon_device *rdev); |
| @@ -1021,7 +1037,7 @@ extern int r100_cp_reset(struct radeon_device *rdev); | |||
| 1021 | extern void r100_vga_render_disable(struct radeon_device *rdev); | 1037 | extern void r100_vga_render_disable(struct radeon_device *rdev); |
| 1022 | extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, | 1038 | extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, |
| 1023 | struct radeon_cs_packet *pkt, | 1039 | struct radeon_cs_packet *pkt, |
| 1024 | struct radeon_object *robj); | 1040 | struct radeon_bo *robj); |
| 1025 | extern int r100_cs_parse_packet0(struct radeon_cs_parser *p, | 1041 | extern int r100_cs_parse_packet0(struct radeon_cs_parser *p, |
| 1026 | struct radeon_cs_packet *pkt, | 1042 | struct radeon_cs_packet *pkt, |
| 1027 | const unsigned *auth, unsigned n, | 1043 | const unsigned *auth, unsigned n, |
| @@ -1029,6 +1045,8 @@ extern int r100_cs_parse_packet0(struct radeon_cs_parser *p, | |||
| 1029 | extern int r100_cs_packet_parse(struct radeon_cs_parser *p, | 1045 | extern int r100_cs_packet_parse(struct radeon_cs_parser *p, |
| 1030 | struct radeon_cs_packet *pkt, | 1046 | struct radeon_cs_packet *pkt, |
| 1031 | unsigned idx); | 1047 | unsigned idx); |
| 1048 | extern void r100_enable_bm(struct radeon_device *rdev); | ||
| 1049 | extern void r100_set_common_regs(struct radeon_device *rdev); | ||
| 1032 | 1050 | ||
| 1033 | /* rv200,rv250,rv280 */ | 1051 | /* rv200,rv250,rv280 */ |
| 1034 | extern void r200_set_safe_registers(struct radeon_device *rdev); | 1052 | extern void r200_set_safe_registers(struct radeon_device *rdev); |
| @@ -1104,7 +1122,14 @@ extern void r600_wb_disable(struct radeon_device *rdev); | |||
| 1104 | extern void r600_scratch_init(struct radeon_device *rdev); | 1122 | extern void r600_scratch_init(struct radeon_device *rdev); |
| 1105 | extern int r600_blit_init(struct radeon_device *rdev); | 1123 | extern int r600_blit_init(struct radeon_device *rdev); |
| 1106 | extern void r600_blit_fini(struct radeon_device *rdev); | 1124 | extern void r600_blit_fini(struct radeon_device *rdev); |
| 1107 | extern int r600_cp_init_microcode(struct radeon_device *rdev); | 1125 | extern int r600_init_microcode(struct radeon_device *rdev); |
| 1108 | extern int r600_gpu_reset(struct radeon_device *rdev); | 1126 | extern int r600_gpu_reset(struct radeon_device *rdev); |
| 1127 | /* r600 irq */ | ||
| 1128 | extern int r600_irq_init(struct radeon_device *rdev); | ||
| 1129 | extern void r600_irq_fini(struct radeon_device *rdev); | ||
| 1130 | extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); | ||
| 1131 | extern int r600_irq_set(struct radeon_device *rdev); | ||
| 1132 | |||
| 1133 | #include "radeon_object.h" | ||
| 1109 | 1134 | ||
| 1110 | #endif | 1135 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index c18fbee387d7..755f50555c3d 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
| @@ -76,6 +76,7 @@ int r100_clear_surface_reg(struct radeon_device *rdev, int reg); | |||
| 76 | void r100_bandwidth_update(struct radeon_device *rdev); | 76 | void r100_bandwidth_update(struct radeon_device *rdev); |
| 77 | void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | 77 | void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
| 78 | int r100_ring_test(struct radeon_device *rdev); | 78 | int r100_ring_test(struct radeon_device *rdev); |
| 79 | void r100_hdp_flush(struct radeon_device *rdev); | ||
| 79 | 80 | ||
| 80 | static struct radeon_asic r100_asic = { | 81 | static struct radeon_asic r100_asic = { |
| 81 | .init = &r100_init, | 82 | .init = &r100_init, |
| @@ -107,6 +108,7 @@ static struct radeon_asic r100_asic = { | |||
| 107 | .set_surface_reg = r100_set_surface_reg, | 108 | .set_surface_reg = r100_set_surface_reg, |
| 108 | .clear_surface_reg = r100_clear_surface_reg, | 109 | .clear_surface_reg = r100_clear_surface_reg, |
| 109 | .bandwidth_update = &r100_bandwidth_update, | 110 | .bandwidth_update = &r100_bandwidth_update, |
| 111 | .hdp_flush = &r100_hdp_flush, | ||
| 110 | }; | 112 | }; |
| 111 | 113 | ||
| 112 | 114 | ||
| @@ -162,6 +164,7 @@ static struct radeon_asic r300_asic = { | |||
| 162 | .set_surface_reg = r100_set_surface_reg, | 164 | .set_surface_reg = r100_set_surface_reg, |
| 163 | .clear_surface_reg = r100_clear_surface_reg, | 165 | .clear_surface_reg = r100_clear_surface_reg, |
| 164 | .bandwidth_update = &r100_bandwidth_update, | 166 | .bandwidth_update = &r100_bandwidth_update, |
| 167 | .hdp_flush = &r100_hdp_flush, | ||
| 165 | }; | 168 | }; |
| 166 | 169 | ||
| 167 | /* | 170 | /* |
| @@ -201,6 +204,7 @@ static struct radeon_asic r420_asic = { | |||
| 201 | .set_surface_reg = r100_set_surface_reg, | 204 | .set_surface_reg = r100_set_surface_reg, |
| 202 | .clear_surface_reg = r100_clear_surface_reg, | 205 | .clear_surface_reg = r100_clear_surface_reg, |
| 203 | .bandwidth_update = &r100_bandwidth_update, | 206 | .bandwidth_update = &r100_bandwidth_update, |
| 207 | .hdp_flush = &r100_hdp_flush, | ||
| 204 | }; | 208 | }; |
| 205 | 209 | ||
| 206 | 210 | ||
| @@ -245,6 +249,7 @@ static struct radeon_asic rs400_asic = { | |||
| 245 | .set_surface_reg = r100_set_surface_reg, | 249 | .set_surface_reg = r100_set_surface_reg, |
| 246 | .clear_surface_reg = r100_clear_surface_reg, | 250 | .clear_surface_reg = r100_clear_surface_reg, |
| 247 | .bandwidth_update = &r100_bandwidth_update, | 251 | .bandwidth_update = &r100_bandwidth_update, |
| 252 | .hdp_flush = &r100_hdp_flush, | ||
| 248 | }; | 253 | }; |
| 249 | 254 | ||
| 250 | 255 | ||
| @@ -291,6 +296,7 @@ static struct radeon_asic rs600_asic = { | |||
| 291 | .set_pcie_lanes = NULL, | 296 | .set_pcie_lanes = NULL, |
| 292 | .set_clock_gating = &radeon_atom_set_clock_gating, | 297 | .set_clock_gating = &radeon_atom_set_clock_gating, |
| 293 | .bandwidth_update = &rs600_bandwidth_update, | 298 | .bandwidth_update = &rs600_bandwidth_update, |
| 299 | .hdp_flush = &r100_hdp_flush, | ||
| 294 | }; | 300 | }; |
| 295 | 301 | ||
| 296 | 302 | ||
| @@ -334,6 +340,7 @@ static struct radeon_asic rs690_asic = { | |||
| 334 | .set_surface_reg = r100_set_surface_reg, | 340 | .set_surface_reg = r100_set_surface_reg, |
| 335 | .clear_surface_reg = r100_clear_surface_reg, | 341 | .clear_surface_reg = r100_clear_surface_reg, |
| 336 | .bandwidth_update = &rs690_bandwidth_update, | 342 | .bandwidth_update = &rs690_bandwidth_update, |
| 343 | .hdp_flush = &r100_hdp_flush, | ||
| 337 | }; | 344 | }; |
| 338 | 345 | ||
| 339 | 346 | ||
| @@ -381,6 +388,7 @@ static struct radeon_asic rv515_asic = { | |||
| 381 | .set_surface_reg = r100_set_surface_reg, | 388 | .set_surface_reg = r100_set_surface_reg, |
| 382 | .clear_surface_reg = r100_clear_surface_reg, | 389 | .clear_surface_reg = r100_clear_surface_reg, |
| 383 | .bandwidth_update = &rv515_bandwidth_update, | 390 | .bandwidth_update = &rv515_bandwidth_update, |
| 391 | .hdp_flush = &r100_hdp_flush, | ||
| 384 | }; | 392 | }; |
| 385 | 393 | ||
| 386 | 394 | ||
| @@ -419,6 +427,7 @@ static struct radeon_asic r520_asic = { | |||
| 419 | .set_surface_reg = r100_set_surface_reg, | 427 | .set_surface_reg = r100_set_surface_reg, |
| 420 | .clear_surface_reg = r100_clear_surface_reg, | 428 | .clear_surface_reg = r100_clear_surface_reg, |
| 421 | .bandwidth_update = &rv515_bandwidth_update, | 429 | .bandwidth_update = &rv515_bandwidth_update, |
| 430 | .hdp_flush = &r100_hdp_flush, | ||
| 422 | }; | 431 | }; |
| 423 | 432 | ||
| 424 | /* | 433 | /* |
| @@ -455,6 +464,7 @@ int r600_ring_test(struct radeon_device *rdev); | |||
| 455 | int r600_copy_blit(struct radeon_device *rdev, | 464 | int r600_copy_blit(struct radeon_device *rdev, |
| 456 | uint64_t src_offset, uint64_t dst_offset, | 465 | uint64_t src_offset, uint64_t dst_offset, |
| 457 | unsigned num_pages, struct radeon_fence *fence); | 466 | unsigned num_pages, struct radeon_fence *fence); |
| 467 | void r600_hdp_flush(struct radeon_device *rdev); | ||
| 458 | 468 | ||
| 459 | static struct radeon_asic r600_asic = { | 469 | static struct radeon_asic r600_asic = { |
| 460 | .init = &r600_init, | 470 | .init = &r600_init, |
| @@ -470,6 +480,7 @@ static struct radeon_asic r600_asic = { | |||
| 470 | .ring_ib_execute = &r600_ring_ib_execute, | 480 | .ring_ib_execute = &r600_ring_ib_execute, |
| 471 | .irq_set = &r600_irq_set, | 481 | .irq_set = &r600_irq_set, |
| 472 | .irq_process = &r600_irq_process, | 482 | .irq_process = &r600_irq_process, |
| 483 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
| 473 | .fence_ring_emit = &r600_fence_ring_emit, | 484 | .fence_ring_emit = &r600_fence_ring_emit, |
| 474 | .cs_parse = &r600_cs_parse, | 485 | .cs_parse = &r600_cs_parse, |
| 475 | .copy_blit = &r600_copy_blit, | 486 | .copy_blit = &r600_copy_blit, |
| @@ -484,6 +495,7 @@ static struct radeon_asic r600_asic = { | |||
| 484 | .set_surface_reg = r600_set_surface_reg, | 495 | .set_surface_reg = r600_set_surface_reg, |
| 485 | .clear_surface_reg = r600_clear_surface_reg, | 496 | .clear_surface_reg = r600_clear_surface_reg, |
| 486 | .bandwidth_update = &rv515_bandwidth_update, | 497 | .bandwidth_update = &rv515_bandwidth_update, |
| 498 | .hdp_flush = &r600_hdp_flush, | ||
| 487 | }; | 499 | }; |
| 488 | 500 | ||
| 489 | /* | 501 | /* |
| @@ -509,6 +521,7 @@ static struct radeon_asic rv770_asic = { | |||
| 509 | .ring_ib_execute = &r600_ring_ib_execute, | 521 | .ring_ib_execute = &r600_ring_ib_execute, |
| 510 | .irq_set = &r600_irq_set, | 522 | .irq_set = &r600_irq_set, |
| 511 | .irq_process = &r600_irq_process, | 523 | .irq_process = &r600_irq_process, |
| 524 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
| 512 | .fence_ring_emit = &r600_fence_ring_emit, | 525 | .fence_ring_emit = &r600_fence_ring_emit, |
| 513 | .cs_parse = &r600_cs_parse, | 526 | .cs_parse = &r600_cs_parse, |
| 514 | .copy_blit = &r600_copy_blit, | 527 | .copy_blit = &r600_copy_blit, |
| @@ -523,6 +536,7 @@ static struct radeon_asic rv770_asic = { | |||
| 523 | .set_surface_reg = r600_set_surface_reg, | 536 | .set_surface_reg = r600_set_surface_reg, |
| 524 | .clear_surface_reg = r600_clear_surface_reg, | 537 | .clear_surface_reg = r600_clear_surface_reg, |
| 525 | .bandwidth_update = &rv515_bandwidth_update, | 538 | .bandwidth_update = &rv515_bandwidth_update, |
| 539 | .hdp_flush = &r600_hdp_flush, | ||
| 526 | }; | 540 | }; |
| 527 | 541 | ||
| 528 | #endif | 542 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 2ed88a820935..5e414102c875 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
| @@ -82,18 +82,18 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device | |||
| 82 | 82 | ||
| 83 | i2c.mask_clk_reg = le16_to_cpu(gpio.usClkMaskRegisterIndex) * 4; | 83 | i2c.mask_clk_reg = le16_to_cpu(gpio.usClkMaskRegisterIndex) * 4; |
| 84 | i2c.mask_data_reg = le16_to_cpu(gpio.usDataMaskRegisterIndex) * 4; | 84 | i2c.mask_data_reg = le16_to_cpu(gpio.usDataMaskRegisterIndex) * 4; |
| 85 | i2c.put_clk_reg = le16_to_cpu(gpio.usClkEnRegisterIndex) * 4; | 85 | i2c.en_clk_reg = le16_to_cpu(gpio.usClkEnRegisterIndex) * 4; |
| 86 | i2c.put_data_reg = le16_to_cpu(gpio.usDataEnRegisterIndex) * 4; | 86 | i2c.en_data_reg = le16_to_cpu(gpio.usDataEnRegisterIndex) * 4; |
| 87 | i2c.get_clk_reg = le16_to_cpu(gpio.usClkY_RegisterIndex) * 4; | 87 | i2c.y_clk_reg = le16_to_cpu(gpio.usClkY_RegisterIndex) * 4; |
| 88 | i2c.get_data_reg = le16_to_cpu(gpio.usDataY_RegisterIndex) * 4; | 88 | i2c.y_data_reg = le16_to_cpu(gpio.usDataY_RegisterIndex) * 4; |
| 89 | i2c.a_clk_reg = le16_to_cpu(gpio.usClkA_RegisterIndex) * 4; | 89 | i2c.a_clk_reg = le16_to_cpu(gpio.usClkA_RegisterIndex) * 4; |
| 90 | i2c.a_data_reg = le16_to_cpu(gpio.usDataA_RegisterIndex) * 4; | 90 | i2c.a_data_reg = le16_to_cpu(gpio.usDataA_RegisterIndex) * 4; |
| 91 | i2c.mask_clk_mask = (1 << gpio.ucClkMaskShift); | 91 | i2c.mask_clk_mask = (1 << gpio.ucClkMaskShift); |
| 92 | i2c.mask_data_mask = (1 << gpio.ucDataMaskShift); | 92 | i2c.mask_data_mask = (1 << gpio.ucDataMaskShift); |
| 93 | i2c.put_clk_mask = (1 << gpio.ucClkEnShift); | 93 | i2c.en_clk_mask = (1 << gpio.ucClkEnShift); |
| 94 | i2c.put_data_mask = (1 << gpio.ucDataEnShift); | 94 | i2c.en_data_mask = (1 << gpio.ucDataEnShift); |
| 95 | i2c.get_clk_mask = (1 << gpio.ucClkY_Shift); | 95 | i2c.y_clk_mask = (1 << gpio.ucClkY_Shift); |
| 96 | i2c.get_data_mask = (1 << gpio.ucDataY_Shift); | 96 | i2c.y_data_mask = (1 << gpio.ucDataY_Shift); |
| 97 | i2c.a_clk_mask = (1 << gpio.ucClkA_Shift); | 97 | i2c.a_clk_mask = (1 << gpio.ucClkA_Shift); |
| 98 | i2c.a_data_mask = (1 << gpio.ucDataA_Shift); | 98 | i2c.a_data_mask = (1 << gpio.ucDataA_Shift); |
| 99 | i2c.valid = true; | 99 | i2c.valid = true; |
| @@ -135,6 +135,23 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
| 135 | } | 135 | } |
| 136 | } | 136 | } |
| 137 | 137 | ||
| 138 | /* HIS X1300 is DVI+VGA, not DVI+DVI */ | ||
| 139 | if ((dev->pdev->device == 0x7146) && | ||
| 140 | (dev->pdev->subsystem_vendor == 0x17af) && | ||
| 141 | (dev->pdev->subsystem_device == 0x2058)) { | ||
| 142 | if (supported_device == ATOM_DEVICE_DFP1_SUPPORT) | ||
| 143 | return false; | ||
| 144 | } | ||
| 145 | |||
| 146 | /* Gigabyte X1300 is DVI+VGA, not DVI+DVI */ | ||
| 147 | if ((dev->pdev->device == 0x7142) && | ||
| 148 | (dev->pdev->subsystem_vendor == 0x1458) && | ||
| 149 | (dev->pdev->subsystem_device == 0x2134)) { | ||
| 150 | if (supported_device == ATOM_DEVICE_DFP1_SUPPORT) | ||
| 151 | return false; | ||
| 152 | } | ||
| 153 | |||
| 154 | |||
| 138 | /* Funky macbooks */ | 155 | /* Funky macbooks */ |
| 139 | if ((dev->pdev->device == 0x71C5) && | 156 | if ((dev->pdev->device == 0x71C5) && |
| 140 | (dev->pdev->subsystem_vendor == 0x106b) && | 157 | (dev->pdev->subsystem_vendor == 0x106b) && |
| @@ -172,6 +189,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
| 172 | } | 189 | } |
| 173 | } | 190 | } |
| 174 | 191 | ||
| 192 | /* Acer laptop reports DVI-D as DVI-I */ | ||
| 193 | if ((dev->pdev->device == 0x95c4) && | ||
| 194 | (dev->pdev->subsystem_vendor == 0x1025) && | ||
| 195 | (dev->pdev->subsystem_device == 0x013c)) { | ||
| 196 | if ((*connector_type == DRM_MODE_CONNECTOR_DVII) && | ||
| 197 | (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) | ||
| 198 | *connector_type = DRM_MODE_CONNECTOR_DVID; | ||
| 199 | } | ||
| 200 | |||
| 175 | return true; | 201 | return true; |
| 176 | } | 202 | } |
| 177 | 203 | ||
| @@ -901,7 +927,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
| 901 | struct radeon_device *rdev = dev->dev_private; | 927 | struct radeon_device *rdev = dev->dev_private; |
| 902 | struct radeon_mode_info *mode_info = &rdev->mode_info; | 928 | struct radeon_mode_info *mode_info = &rdev->mode_info; |
| 903 | int index = GetIndexIntoMasterTable(DATA, LVDS_Info); | 929 | int index = GetIndexIntoMasterTable(DATA, LVDS_Info); |
| 904 | uint16_t data_offset; | 930 | uint16_t data_offset, misc; |
| 905 | union lvds_info *lvds_info; | 931 | union lvds_info *lvds_info; |
| 906 | uint8_t frev, crev; | 932 | uint8_t frev, crev; |
| 907 | struct radeon_encoder_atom_dig *lvds = NULL; | 933 | struct radeon_encoder_atom_dig *lvds = NULL; |
| @@ -940,6 +966,19 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
| 940 | lvds->panel_pwr_delay = | 966 | lvds->panel_pwr_delay = |
| 941 | le16_to_cpu(lvds_info->info.usOffDelayInMs); | 967 | le16_to_cpu(lvds_info->info.usOffDelayInMs); |
| 942 | lvds->lvds_misc = lvds_info->info.ucLVDS_Misc; | 968 | lvds->lvds_misc = lvds_info->info.ucLVDS_Misc; |
| 969 | |||
| 970 | misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess); | ||
| 971 | if (misc & ATOM_VSYNC_POLARITY) | ||
| 972 | lvds->native_mode.flags |= DRM_MODE_FLAG_NVSYNC; | ||
| 973 | if (misc & ATOM_HSYNC_POLARITY) | ||
| 974 | lvds->native_mode.flags |= DRM_MODE_FLAG_NHSYNC; | ||
| 975 | if (misc & ATOM_COMPOSITESYNC) | ||
| 976 | lvds->native_mode.flags |= DRM_MODE_FLAG_CSYNC; | ||
| 977 | if (misc & ATOM_INTERLACE) | ||
| 978 | lvds->native_mode.flags |= DRM_MODE_FLAG_INTERLACE; | ||
| 979 | if (misc & ATOM_DOUBLE_CLOCK_MODE) | ||
| 980 | lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN; | ||
| 981 | |||
| 943 | /* set crtc values */ | 982 | /* set crtc values */ |
| 944 | drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); | 983 | drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); |
| 945 | 984 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 10bd50a7db87..4ddfd4b5bc51 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c | |||
| @@ -29,8 +29,8 @@ | |||
| 29 | void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | 29 | void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, |
| 30 | unsigned sdomain, unsigned ddomain) | 30 | unsigned sdomain, unsigned ddomain) |
| 31 | { | 31 | { |
| 32 | struct radeon_object *dobj = NULL; | 32 | struct radeon_bo *dobj = NULL; |
| 33 | struct radeon_object *sobj = NULL; | 33 | struct radeon_bo *sobj = NULL; |
| 34 | struct radeon_fence *fence = NULL; | 34 | struct radeon_fence *fence = NULL; |
| 35 | uint64_t saddr, daddr; | 35 | uint64_t saddr, daddr; |
| 36 | unsigned long start_jiffies; | 36 | unsigned long start_jiffies; |
| @@ -41,19 +41,27 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | |||
| 41 | 41 | ||
| 42 | size = bsize; | 42 | size = bsize; |
| 43 | n = 1024; | 43 | n = 1024; |
| 44 | r = radeon_object_create(rdev, NULL, size, true, sdomain, false, &sobj); | 44 | r = radeon_bo_create(rdev, NULL, size, true, sdomain, &sobj); |
| 45 | if (r) { | 45 | if (r) { |
| 46 | goto out_cleanup; | 46 | goto out_cleanup; |
| 47 | } | 47 | } |
| 48 | r = radeon_object_pin(sobj, sdomain, &saddr); | 48 | r = radeon_bo_reserve(sobj, false); |
| 49 | if (unlikely(r != 0)) | ||
| 50 | goto out_cleanup; | ||
| 51 | r = radeon_bo_pin(sobj, sdomain, &saddr); | ||
| 52 | radeon_bo_unreserve(sobj); | ||
| 49 | if (r) { | 53 | if (r) { |
| 50 | goto out_cleanup; | 54 | goto out_cleanup; |
| 51 | } | 55 | } |
| 52 | r = radeon_object_create(rdev, NULL, size, true, ddomain, false, &dobj); | 56 | r = radeon_bo_create(rdev, NULL, size, true, ddomain, &dobj); |
| 53 | if (r) { | 57 | if (r) { |
| 54 | goto out_cleanup; | 58 | goto out_cleanup; |
| 55 | } | 59 | } |
| 56 | r = radeon_object_pin(dobj, ddomain, &daddr); | 60 | r = radeon_bo_reserve(dobj, false); |
| 61 | if (unlikely(r != 0)) | ||
| 62 | goto out_cleanup; | ||
| 63 | r = radeon_bo_pin(dobj, ddomain, &daddr); | ||
| 64 | radeon_bo_unreserve(dobj); | ||
| 57 | if (r) { | 65 | if (r) { |
| 58 | goto out_cleanup; | 66 | goto out_cleanup; |
| 59 | } | 67 | } |
| @@ -109,12 +117,20 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | |||
| 109 | } | 117 | } |
| 110 | out_cleanup: | 118 | out_cleanup: |
| 111 | if (sobj) { | 119 | if (sobj) { |
| 112 | radeon_object_unpin(sobj); | 120 | r = radeon_bo_reserve(sobj, false); |
| 113 | radeon_object_unref(&sobj); | 121 | if (likely(r == 0)) { |
| 122 | radeon_bo_unpin(sobj); | ||
| 123 | radeon_bo_unreserve(sobj); | ||
| 124 | } | ||
| 125 | radeon_bo_unref(&sobj); | ||
| 114 | } | 126 | } |
| 115 | if (dobj) { | 127 | if (dobj) { |
| 116 | radeon_object_unpin(dobj); | 128 | r = radeon_bo_reserve(dobj, false); |
| 117 | radeon_object_unref(&dobj); | 129 | if (likely(r == 0)) { |
| 130 | radeon_bo_unpin(dobj); | ||
| 131 | radeon_bo_unreserve(dobj); | ||
| 132 | } | ||
| 133 | radeon_bo_unref(&dobj); | ||
| 118 | } | 134 | } |
| 119 | if (fence) { | 135 | if (fence) { |
| 120 | radeon_fence_unref(&fence); | 136 | radeon_fence_unref(&fence); |
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c index a81354167621..b062109efbee 100644 --- a/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/drivers/gpu/drm/radeon/radeon_clocks.c | |||
| @@ -44,6 +44,10 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev) | |||
| 44 | 44 | ||
| 45 | ref_div = | 45 | ref_div = |
| 46 | RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK; | 46 | RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK; |
| 47 | |||
| 48 | if (ref_div == 0) | ||
| 49 | return 0; | ||
| 50 | |||
| 47 | sclk = fb_div / ref_div; | 51 | sclk = fb_div / ref_div; |
| 48 | 52 | ||
| 49 | post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK; | 53 | post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK; |
| @@ -70,6 +74,10 @@ static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev) | |||
| 70 | 74 | ||
| 71 | ref_div = | 75 | ref_div = |
| 72 | RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK; | 76 | RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK; |
| 77 | |||
| 78 | if (ref_div == 0) | ||
| 79 | return 0; | ||
| 80 | |||
| 73 | mclk = fb_div / ref_div; | 81 | mclk = fb_div / ref_div; |
| 74 | 82 | ||
| 75 | post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7; | 83 | post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7; |
| @@ -98,8 +106,19 @@ void radeon_get_clock_info(struct drm_device *dev) | |||
| 98 | ret = radeon_combios_get_clock_info(dev); | 106 | ret = radeon_combios_get_clock_info(dev); |
| 99 | 107 | ||
| 100 | if (ret) { | 108 | if (ret) { |
| 101 | if (p1pll->reference_div < 2) | 109 | if (p1pll->reference_div < 2) { |
| 102 | p1pll->reference_div = 12; | 110 | if (!ASIC_IS_AVIVO(rdev)) { |
| 111 | u32 tmp = RREG32_PLL(RADEON_PPLL_REF_DIV); | ||
| 112 | if (ASIC_IS_R300(rdev)) | ||
| 113 | p1pll->reference_div = | ||
| 114 | (tmp & R300_PPLL_REF_DIV_ACC_MASK) >> R300_PPLL_REF_DIV_ACC_SHIFT; | ||
| 115 | else | ||
| 116 | p1pll->reference_div = tmp & RADEON_PPLL_REF_DIV_MASK; | ||
| 117 | if (p1pll->reference_div < 2) | ||
| 118 | p1pll->reference_div = 12; | ||
| 119 | } else | ||
| 120 | p1pll->reference_div = 12; | ||
| 121 | } | ||
| 103 | if (p2pll->reference_div < 2) | 122 | if (p2pll->reference_div < 2) |
| 104 | p2pll->reference_div = 12; | 123 | p2pll->reference_div = 12; |
| 105 | if (rdev->family < CHIP_RS600) { | 124 | if (rdev->family < CHIP_RS600) { |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 5253cbf6db1f..14d3555e4afe 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
| @@ -450,29 +450,29 @@ struct radeon_i2c_bus_rec combios_setup_i2c_bus(int ddc_line) | |||
| 450 | i2c.mask_data_mask = RADEON_GPIO_EN_0; | 450 | i2c.mask_data_mask = RADEON_GPIO_EN_0; |
| 451 | i2c.a_clk_mask = RADEON_GPIO_A_1; | 451 | i2c.a_clk_mask = RADEON_GPIO_A_1; |
| 452 | i2c.a_data_mask = RADEON_GPIO_A_0; | 452 | i2c.a_data_mask = RADEON_GPIO_A_0; |
| 453 | i2c.put_clk_mask = RADEON_GPIO_EN_1; | 453 | i2c.en_clk_mask = RADEON_GPIO_EN_1; |
| 454 | i2c.put_data_mask = RADEON_GPIO_EN_0; | 454 | i2c.en_data_mask = RADEON_GPIO_EN_0; |
| 455 | i2c.get_clk_mask = RADEON_GPIO_Y_1; | 455 | i2c.y_clk_mask = RADEON_GPIO_Y_1; |
| 456 | i2c.get_data_mask = RADEON_GPIO_Y_0; | 456 | i2c.y_data_mask = RADEON_GPIO_Y_0; |
| 457 | if ((ddc_line == RADEON_LCD_GPIO_MASK) || | 457 | if ((ddc_line == RADEON_LCD_GPIO_MASK) || |
| 458 | (ddc_line == RADEON_MDGPIO_EN_REG)) { | 458 | (ddc_line == RADEON_MDGPIO_EN_REG)) { |
| 459 | i2c.mask_clk_reg = ddc_line; | 459 | i2c.mask_clk_reg = ddc_line; |
| 460 | i2c.mask_data_reg = ddc_line; | 460 | i2c.mask_data_reg = ddc_line; |
| 461 | i2c.a_clk_reg = ddc_line; | 461 | i2c.a_clk_reg = ddc_line; |
| 462 | i2c.a_data_reg = ddc_line; | 462 | i2c.a_data_reg = ddc_line; |
| 463 | i2c.put_clk_reg = ddc_line; | 463 | i2c.en_clk_reg = ddc_line; |
| 464 | i2c.put_data_reg = ddc_line; | 464 | i2c.en_data_reg = ddc_line; |
| 465 | i2c.get_clk_reg = ddc_line + 4; | 465 | i2c.y_clk_reg = ddc_line + 4; |
| 466 | i2c.get_data_reg = ddc_line + 4; | 466 | i2c.y_data_reg = ddc_line + 4; |
| 467 | } else { | 467 | } else { |
| 468 | i2c.mask_clk_reg = ddc_line; | 468 | i2c.mask_clk_reg = ddc_line; |
| 469 | i2c.mask_data_reg = ddc_line; | 469 | i2c.mask_data_reg = ddc_line; |
| 470 | i2c.a_clk_reg = ddc_line; | 470 | i2c.a_clk_reg = ddc_line; |
| 471 | i2c.a_data_reg = ddc_line; | 471 | i2c.a_data_reg = ddc_line; |
| 472 | i2c.put_clk_reg = ddc_line; | 472 | i2c.en_clk_reg = ddc_line; |
| 473 | i2c.put_data_reg = ddc_line; | 473 | i2c.en_data_reg = ddc_line; |
| 474 | i2c.get_clk_reg = ddc_line; | 474 | i2c.y_clk_reg = ddc_line; |
| 475 | i2c.get_data_reg = ddc_line; | 475 | i2c.y_data_reg = ddc_line; |
| 476 | } | 476 | } |
| 477 | 477 | ||
| 478 | if (ddc_line) | 478 | if (ddc_line) |
| @@ -495,7 +495,7 @@ bool radeon_combios_get_clock_info(struct drm_device *dev) | |||
| 495 | uint16_t sclk, mclk; | 495 | uint16_t sclk, mclk; |
| 496 | 496 | ||
| 497 | if (rdev->bios == NULL) | 497 | if (rdev->bios == NULL) |
| 498 | return NULL; | 498 | return false; |
| 499 | 499 | ||
| 500 | pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE); | 500 | pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE); |
| 501 | if (pll_info) { | 501 | if (pll_info) { |
| @@ -993,8 +993,8 @@ static const struct radeon_tmds_pll default_tmds_pll[CHIP_LAST][4] = { | |||
| 993 | {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R420 */ | 993 | {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R420 */ |
| 994 | {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R423 */ | 994 | {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R423 */ |
| 995 | {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RV410 */ | 995 | {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RV410 */ |
| 996 | {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS400 */ | 996 | { {0, 0}, {0, 0}, {0, 0}, {0, 0} }, /* CHIP_RS400 */ |
| 997 | {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS480 */ | 997 | { {0, 0}, {0, 0}, {0, 0}, {0, 0} }, /* CHIP_RS480 */ |
| 998 | }; | 998 | }; |
| 999 | 999 | ||
| 1000 | bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder, | 1000 | bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder, |
| @@ -1028,7 +1028,6 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder, | |||
| 1028 | tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE); | 1028 | tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE); |
| 1029 | 1029 | ||
| 1030 | if (tmds_info) { | 1030 | if (tmds_info) { |
| 1031 | |||
| 1032 | ver = RBIOS8(tmds_info); | 1031 | ver = RBIOS8(tmds_info); |
| 1033 | DRM_INFO("DFP table revision: %d\n", ver); | 1032 | DRM_INFO("DFP table revision: %d\n", ver); |
| 1034 | if (ver == 3) { | 1033 | if (ver == 3) { |
| @@ -1063,45 +1062,132 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder, | |||
| 1063 | tmds->tmds_pll[i].value); | 1062 | tmds->tmds_pll[i].value); |
| 1064 | } | 1063 | } |
| 1065 | } | 1064 | } |
| 1066 | } else | 1065 | } else { |
| 1067 | DRM_INFO("No TMDS info found in BIOS\n"); | 1066 | DRM_INFO("No TMDS info found in BIOS\n"); |
| 1067 | return false; | ||
| 1068 | } | ||
| 1068 | return true; | 1069 | return true; |
| 1069 | } | 1070 | } |
| 1070 | 1071 | ||
| 1071 | struct radeon_encoder_int_tmds *radeon_combios_get_tmds_info(struct radeon_encoder *encoder) | 1072 | bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder, |
| 1073 | struct radeon_encoder_ext_tmds *tmds) | ||
| 1072 | { | 1074 | { |
| 1073 | struct radeon_encoder_int_tmds *tmds = NULL; | 1075 | struct drm_device *dev = encoder->base.dev; |
| 1074 | bool ret; | 1076 | struct radeon_device *rdev = dev->dev_private; |
| 1075 | 1077 | struct radeon_i2c_bus_rec i2c_bus; | |
| 1076 | tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL); | ||
| 1077 | 1078 | ||
| 1078 | if (!tmds) | 1079 | /* default for macs */ |
| 1079 | return NULL; | 1080 | i2c_bus = combios_setup_i2c_bus(RADEON_GPIO_MONID); |
| 1081 | tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO"); | ||
| 1080 | 1082 | ||
| 1081 | ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds); | 1083 | /* XXX some macs have duallink chips */ |
| 1082 | if (ret == false) | 1084 | switch (rdev->mode_info.connector_table) { |
| 1083 | radeon_legacy_get_tmds_info_from_table(encoder, tmds); | 1085 | case CT_POWERBOOK_EXTERNAL: |
| 1086 | case CT_MINI_EXTERNAL: | ||
| 1087 | default: | ||
| 1088 | tmds->dvo_chip = DVO_SIL164; | ||
| 1089 | tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */ | ||
| 1090 | break; | ||
| 1091 | } | ||
| 1084 | 1092 | ||
| 1085 | return tmds; | 1093 | return true; |
| 1086 | } | 1094 | } |
| 1087 | 1095 | ||
| 1088 | void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder) | 1096 | bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder, |
| 1097 | struct radeon_encoder_ext_tmds *tmds) | ||
| 1089 | { | 1098 | { |
| 1090 | struct drm_device *dev = encoder->base.dev; | 1099 | struct drm_device *dev = encoder->base.dev; |
| 1091 | struct radeon_device *rdev = dev->dev_private; | 1100 | struct radeon_device *rdev = dev->dev_private; |
| 1092 | uint16_t ext_tmds_info; | 1101 | uint16_t offset; |
| 1093 | uint8_t ver; | 1102 | uint8_t ver, id, blocks, clk, data; |
| 1103 | int i; | ||
| 1104 | enum radeon_combios_ddc gpio; | ||
| 1105 | struct radeon_i2c_bus_rec i2c_bus; | ||
| 1094 | 1106 | ||
| 1095 | if (rdev->bios == NULL) | 1107 | if (rdev->bios == NULL) |
| 1096 | return; | 1108 | return false; |
| 1097 | 1109 | ||
| 1098 | ext_tmds_info = | 1110 | tmds->i2c_bus = NULL; |
| 1099 | combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE); | 1111 | if (rdev->flags & RADEON_IS_IGP) { |
| 1100 | if (ext_tmds_info) { | 1112 | offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE); |
| 1101 | ver = RBIOS8(ext_tmds_info); | 1113 | if (offset) { |
| 1102 | DRM_INFO("External TMDS Table revision: %d\n", ver); | 1114 | ver = RBIOS8(offset); |
| 1103 | // TODO | 1115 | DRM_INFO("GPIO Table revision: %d\n", ver); |
| 1116 | blocks = RBIOS8(offset + 2); | ||
| 1117 | for (i = 0; i < blocks; i++) { | ||
| 1118 | id = RBIOS8(offset + 3 + (i * 5) + 0); | ||
| 1119 | if (id == 136) { | ||
| 1120 | clk = RBIOS8(offset + 3 + (i * 5) + 3); | ||
| 1121 | data = RBIOS8(offset + 3 + (i * 5) + 4); | ||
| 1122 | i2c_bus.valid = true; | ||
| 1123 | i2c_bus.mask_clk_mask = (1 << clk); | ||
| 1124 | i2c_bus.mask_data_mask = (1 << data); | ||
| 1125 | i2c_bus.a_clk_mask = (1 << clk); | ||
| 1126 | i2c_bus.a_data_mask = (1 << data); | ||
| 1127 | i2c_bus.en_clk_mask = (1 << clk); | ||
| 1128 | i2c_bus.en_data_mask = (1 << data); | ||
| 1129 | i2c_bus.y_clk_mask = (1 << clk); | ||
| 1130 | i2c_bus.y_data_mask = (1 << data); | ||
| 1131 | i2c_bus.mask_clk_reg = RADEON_GPIOPAD_MASK; | ||
| 1132 | i2c_bus.mask_data_reg = RADEON_GPIOPAD_MASK; | ||
| 1133 | i2c_bus.a_clk_reg = RADEON_GPIOPAD_A; | ||
| 1134 | i2c_bus.a_data_reg = RADEON_GPIOPAD_A; | ||
| 1135 | i2c_bus.en_clk_reg = RADEON_GPIOPAD_EN; | ||
| 1136 | i2c_bus.en_data_reg = RADEON_GPIOPAD_EN; | ||
| 1137 | i2c_bus.y_clk_reg = RADEON_GPIOPAD_Y; | ||
| 1138 | i2c_bus.y_data_reg = RADEON_GPIOPAD_Y; | ||
| 1139 | tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO"); | ||
| 1140 | tmds->dvo_chip = DVO_SIL164; | ||
| 1141 | tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */ | ||
| 1142 | break; | ||
| 1143 | } | ||
| 1144 | } | ||
| 1145 | } | ||
| 1146 | } else { | ||
| 1147 | offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE); | ||
| 1148 | if (offset) { | ||
| 1149 | ver = RBIOS8(offset); | ||
| 1150 | DRM_INFO("External TMDS Table revision: %d\n", ver); | ||
| 1151 | tmds->slave_addr = RBIOS8(offset + 4 + 2); | ||
| 1152 | tmds->slave_addr >>= 1; /* 7 bit addressing */ | ||
| 1153 | gpio = RBIOS8(offset + 4 + 3); | ||
| 1154 | switch (gpio) { | ||
| 1155 | case DDC_MONID: | ||
| 1156 | i2c_bus = combios_setup_i2c_bus(RADEON_GPIO_MONID); | ||
| 1157 | tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO"); | ||
| 1158 | break; | ||
| 1159 | case DDC_DVI: | ||
| 1160 | i2c_bus = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); | ||
| 1161 | tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO"); | ||
| 1162 | break; | ||
| 1163 | case DDC_VGA: | ||
| 1164 | i2c_bus = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); | ||
| 1165 | tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO"); | ||
| 1166 | break; | ||
| 1167 | case DDC_CRT2: | ||
| 1168 | /* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */ | ||
| 1169 | if (rdev->family >= CHIP_R300) | ||
| 1170 | i2c_bus = combios_setup_i2c_bus(RADEON_GPIO_MONID); | ||
| 1171 | else | ||
| 1172 | i2c_bus = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC); | ||
| 1173 | tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO"); | ||
| 1174 | break; | ||
| 1175 | case DDC_LCD: /* MM i2c */ | ||
| 1176 | DRM_ERROR("MM i2c requires hw i2c engine\n"); | ||
| 1177 | break; | ||
| 1178 | default: | ||
| 1179 | DRM_ERROR("Unsupported gpio %d\n", gpio); | ||
| 1180 | break; | ||
| 1181 | } | ||
| 1182 | } | ||
| 1104 | } | 1183 | } |
| 1184 | |||
| 1185 | if (!tmds->i2c_bus) { | ||
| 1186 | DRM_INFO("No valid Ext TMDS info found in BIOS\n"); | ||
| 1187 | return false; | ||
| 1188 | } | ||
| 1189 | |||
| 1190 | return true; | ||
| 1105 | } | 1191 | } |
| 1106 | 1192 | ||
| 1107 | bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | 1193 | bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) |
| @@ -1567,20 +1653,25 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev, | |||
| 1567 | ddc_i2c->mask_data_mask = 0x80; | 1653 | ddc_i2c->mask_data_mask = 0x80; |
| 1568 | ddc_i2c->a_clk_mask = (0x20 << 8); | 1654 | ddc_i2c->a_clk_mask = (0x20 << 8); |
| 1569 | ddc_i2c->a_data_mask = 0x80; | 1655 | ddc_i2c->a_data_mask = 0x80; |
| 1570 | ddc_i2c->put_clk_mask = (0x20 << 8); | 1656 | ddc_i2c->en_clk_mask = (0x20 << 8); |
| 1571 | ddc_i2c->put_data_mask = 0x80; | 1657 | ddc_i2c->en_data_mask = 0x80; |
| 1572 | ddc_i2c->get_clk_mask = (0x20 << 8); | 1658 | ddc_i2c->y_clk_mask = (0x20 << 8); |
| 1573 | ddc_i2c->get_data_mask = 0x80; | 1659 | ddc_i2c->y_data_mask = 0x80; |
| 1574 | ddc_i2c->mask_clk_reg = RADEON_GPIOPAD_MASK; | 1660 | ddc_i2c->mask_clk_reg = RADEON_GPIOPAD_MASK; |
| 1575 | ddc_i2c->mask_data_reg = RADEON_GPIOPAD_MASK; | 1661 | ddc_i2c->mask_data_reg = RADEON_GPIOPAD_MASK; |
| 1576 | ddc_i2c->a_clk_reg = RADEON_GPIOPAD_A; | 1662 | ddc_i2c->a_clk_reg = RADEON_GPIOPAD_A; |
| 1577 | ddc_i2c->a_data_reg = RADEON_GPIOPAD_A; | 1663 | ddc_i2c->a_data_reg = RADEON_GPIOPAD_A; |
| 1578 | ddc_i2c->put_clk_reg = RADEON_GPIOPAD_EN; | 1664 | ddc_i2c->en_clk_reg = RADEON_GPIOPAD_EN; |
| 1579 | ddc_i2c->put_data_reg = RADEON_GPIOPAD_EN; | 1665 | ddc_i2c->en_data_reg = RADEON_GPIOPAD_EN; |
| 1580 | ddc_i2c->get_clk_reg = RADEON_LCD_GPIO_Y_REG; | 1666 | ddc_i2c->y_clk_reg = RADEON_GPIOPAD_Y; |
| 1581 | ddc_i2c->get_data_reg = RADEON_LCD_GPIO_Y_REG; | 1667 | ddc_i2c->y_data_reg = RADEON_GPIOPAD_Y; |
| 1582 | } | 1668 | } |
| 1583 | 1669 | ||
| 1670 | /* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */ | ||
| 1671 | if ((rdev->family >= CHIP_R300) && | ||
| 1672 | ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC) | ||
| 1673 | *ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); | ||
| 1674 | |||
| 1584 | /* Certain IBM chipset RN50s have a BIOS reporting two VGAs, | 1675 | /* Certain IBM chipset RN50s have a BIOS reporting two VGAs, |
| 1585 | one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */ | 1676 | one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */ |
| 1586 | if (dev->pdev->device == 0x515e && | 1677 | if (dev->pdev->device == 0x515e && |
| @@ -1624,6 +1715,12 @@ static bool radeon_apply_legacy_tv_quirks(struct drm_device *dev) | |||
| 1624 | dev->pdev->subsystem_device == 0x280a) | 1715 | dev->pdev->subsystem_device == 0x280a) |
| 1625 | return false; | 1716 | return false; |
| 1626 | 1717 | ||
| 1718 | /* MSI S270 has non-existent TV port */ | ||
| 1719 | if (dev->pdev->device == 0x5955 && | ||
| 1720 | dev->pdev->subsystem_vendor == 0x1462 && | ||
| 1721 | dev->pdev->subsystem_device == 0x0131) | ||
| 1722 | return false; | ||
| 1723 | |||
| 1627 | return true; | 1724 | return true; |
| 1628 | } | 1725 | } |
| 1629 | 1726 | ||
| @@ -1939,13 +2036,13 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
| 1939 | RBIOS32(lcd_ddc_info + 3); | 2036 | RBIOS32(lcd_ddc_info + 3); |
| 1940 | ddc_i2c.a_data_mask = | 2037 | ddc_i2c.a_data_mask = |
| 1941 | RBIOS32(lcd_ddc_info + 7); | 2038 | RBIOS32(lcd_ddc_info + 7); |
| 1942 | ddc_i2c.put_clk_mask = | 2039 | ddc_i2c.en_clk_mask = |
| 1943 | RBIOS32(lcd_ddc_info + 3); | 2040 | RBIOS32(lcd_ddc_info + 3); |
| 1944 | ddc_i2c.put_data_mask = | 2041 | ddc_i2c.en_data_mask = |
| 1945 | RBIOS32(lcd_ddc_info + 7); | 2042 | RBIOS32(lcd_ddc_info + 7); |
| 1946 | ddc_i2c.get_clk_mask = | 2043 | ddc_i2c.y_clk_mask = |
| 1947 | RBIOS32(lcd_ddc_info + 3); | 2044 | RBIOS32(lcd_ddc_info + 3); |
| 1948 | ddc_i2c.get_data_mask = | 2045 | ddc_i2c.y_data_mask = |
| 1949 | RBIOS32(lcd_ddc_info + 7); | 2046 | RBIOS32(lcd_ddc_info + 7); |
| 1950 | break; | 2047 | break; |
| 1951 | case DDC_GPIO: | 2048 | case DDC_GPIO: |
| @@ -1960,13 +2057,13 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
| 1960 | RBIOS32(lcd_ddc_info + 3); | 2057 | RBIOS32(lcd_ddc_info + 3); |
| 1961 | ddc_i2c.a_data_mask = | 2058 | ddc_i2c.a_data_mask = |
| 1962 | RBIOS32(lcd_ddc_info + 7); | 2059 | RBIOS32(lcd_ddc_info + 7); |
| 1963 | ddc_i2c.put_clk_mask = | 2060 | ddc_i2c.en_clk_mask = |
| 1964 | RBIOS32(lcd_ddc_info + 3); | 2061 | RBIOS32(lcd_ddc_info + 3); |
| 1965 | ddc_i2c.put_data_mask = | 2062 | ddc_i2c.en_data_mask = |
| 1966 | RBIOS32(lcd_ddc_info + 7); | 2063 | RBIOS32(lcd_ddc_info + 7); |
| 1967 | ddc_i2c.get_clk_mask = | 2064 | ddc_i2c.y_clk_mask = |
| 1968 | RBIOS32(lcd_ddc_info + 3); | 2065 | RBIOS32(lcd_ddc_info + 3); |
| 1969 | ddc_i2c.get_data_mask = | 2066 | ddc_i2c.y_data_mask = |
| 1970 | RBIOS32(lcd_ddc_info + 7); | 2067 | RBIOS32(lcd_ddc_info + 7); |
| 1971 | break; | 2068 | break; |
| 1972 | default: | 2069 | default: |
| @@ -2014,6 +2111,193 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
| 2014 | return true; | 2111 | return true; |
| 2015 | } | 2112 | } |
| 2016 | 2113 | ||
| 2114 | void radeon_external_tmds_setup(struct drm_encoder *encoder) | ||
| 2115 | { | ||
| 2116 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
| 2117 | struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv; | ||
| 2118 | |||
| 2119 | if (!tmds) | ||
| 2120 | return; | ||
| 2121 | |||
| 2122 | switch (tmds->dvo_chip) { | ||
| 2123 | case DVO_SIL164: | ||
| 2124 | /* sil 164 */ | ||
| 2125 | radeon_i2c_do_lock(tmds->i2c_bus, 1); | ||
| 2126 | radeon_i2c_sw_put_byte(tmds->i2c_bus, | ||
| 2127 | tmds->slave_addr, | ||
| 2128 | 0x08, 0x30); | ||
| 2129 | radeon_i2c_sw_put_byte(tmds->i2c_bus, | ||
| 2130 | tmds->slave_addr, | ||
| 2131 | 0x09, 0x00); | ||
| 2132 | radeon_i2c_sw_put_byte(tmds->i2c_bus, | ||
| 2133 | tmds->slave_addr, | ||
| 2134 | 0x0a, 0x90); | ||
| 2135 | radeon_i2c_sw_put_byte(tmds->i2c_bus, | ||
| 2136 | tmds->slave_addr, | ||
| 2137 | 0x0c, 0x89); | ||
| 2138 | radeon_i2c_sw_put_byte(tmds->i2c_bus, | ||
| 2139 | tmds->slave_addr, | ||
| 2140 | 0x08, 0x3b); | ||
| 2141 | radeon_i2c_do_lock(tmds->i2c_bus, 0); | ||
| 2142 | break; | ||
| 2143 | case DVO_SIL1178: | ||
| 2144 | /* sil 1178 - untested */ | ||
| 2145 | /* | ||
| 2146 | * 0x0f, 0x44 | ||
| 2147 | * 0x0f, 0x4c | ||
| 2148 | * 0x0e, 0x01 | ||
| 2149 | * 0x0a, 0x80 | ||
| 2150 | * 0x09, 0x30 | ||
| 2151 | * 0x0c, 0xc9 | ||
| 2152 | * 0x0d, 0x70 | ||
| 2153 | * 0x08, 0x32 | ||
| 2154 | * 0x08, 0x33 | ||
| 2155 | */ | ||
| 2156 | break; | ||
| 2157 | default: | ||
| 2158 | break; | ||
| 2159 | } | ||
| 2160 | |||
| 2161 | } | ||
| 2162 | |||
| 2163 | bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder) | ||
| 2164 | { | ||
| 2165 | struct drm_device *dev = encoder->dev; | ||
| 2166 | struct radeon_device *rdev = dev->dev_private; | ||
| 2167 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
| 2168 | uint16_t offset; | ||
| 2169 | uint8_t blocks, slave_addr, rev; | ||
| 2170 | uint32_t index, id; | ||
| 2171 | uint32_t reg, val, and_mask, or_mask; | ||
| 2172 | struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv; | ||
| 2173 | |||
| 2174 | if (rdev->bios == NULL) | ||
| 2175 | return false; | ||
| 2176 | |||
| 2177 | if (!tmds) | ||
| 2178 | return false; | ||
| 2179 | |||
| 2180 | if (rdev->flags & RADEON_IS_IGP) { | ||
| 2181 | offset = combios_get_table_offset(dev, COMBIOS_TMDS_POWER_ON_TABLE); | ||
| 2182 | rev = RBIOS8(offset); | ||
| 2183 | if (offset) { | ||
| 2184 | rev = RBIOS8(offset); | ||
| 2185 | if (rev > 1) { | ||
| 2186 | blocks = RBIOS8(offset + 3); | ||
| 2187 | index = offset + 4; | ||
| 2188 | while (blocks > 0) { | ||
| 2189 | id = RBIOS16(index); | ||
| 2190 | index += 2; | ||
| 2191 | switch (id >> 13) { | ||
| 2192 | case 0: | ||
| 2193 | reg = (id & 0x1fff) * 4; | ||
| 2194 | val = RBIOS32(index); | ||
| 2195 | index += 4; | ||
| 2196 | WREG32(reg, val); | ||
| 2197 | break; | ||
| 2198 | case 2: | ||
| 2199 | reg = (id & 0x1fff) * 4; | ||
| 2200 | and_mask = RBIOS32(index); | ||
| 2201 | index += 4; | ||
| 2202 | or_mask = RBIOS32(index); | ||
| 2203 | index += 4; | ||
| 2204 | val = RREG32(reg); | ||
| 2205 | val = (val & and_mask) | or_mask; | ||
| 2206 | WREG32(reg, val); | ||
| 2207 | break; | ||
| 2208 | case 3: | ||
| 2209 | val = RBIOS16(index); | ||
| 2210 | index += 2; | ||
| 2211 | udelay(val); | ||
| 2212 | break; | ||
| 2213 | case 4: | ||
| 2214 | val = RBIOS16(index); | ||
| 2215 | index += 2; | ||
| 2216 | udelay(val * 1000); | ||
| 2217 | break; | ||
| 2218 | case 6: | ||
| 2219 | slave_addr = id & 0xff; | ||
| 2220 | slave_addr >>= 1; /* 7 bit addressing */ | ||
| 2221 | index++; | ||
| 2222 | reg = RBIOS8(index); | ||
| 2223 | index++; | ||
| 2224 | val = RBIOS8(index); | ||
| 2225 | index++; | ||
| 2226 | radeon_i2c_do_lock(tmds->i2c_bus, 1); | ||
| 2227 | radeon_i2c_sw_put_byte(tmds->i2c_bus, | ||
| 2228 | slave_addr, | ||
| 2229 | reg, val); | ||
| 2230 | radeon_i2c_do_lock(tmds->i2c_bus, 0); | ||
| 2231 | break; | ||
| 2232 | default: | ||
| 2233 | DRM_ERROR("Unknown id %d\n", id >> 13); | ||
| 2234 | break; | ||
| 2235 | } | ||
| 2236 | blocks--; | ||
| 2237 | } | ||
| 2238 | return true; | ||
| 2239 | } | ||
| 2240 | } | ||
| 2241 | } else { | ||
| 2242 | offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE); | ||
| 2243 | if (offset) { | ||
| 2244 | index = offset + 10; | ||
| 2245 | id = RBIOS16(index); | ||
| 2246 | while (id != 0xffff) { | ||
| 2247 | index += 2; | ||
| 2248 | switch (id >> 13) { | ||
| 2249 | case 0: | ||
| 2250 | reg = (id & 0x1fff) * 4; | ||
| 2251 | val = RBIOS32(index); | ||
| 2252 | WREG32(reg, val); | ||
| 2253 | break; | ||
| 2254 | case 2: | ||
| 2255 | reg = (id & 0x1fff) * 4; | ||
| 2256 | and_mask = RBIOS32(index); | ||
| 2257 | index += 4; | ||
| 2258 | or_mask = RBIOS32(index); | ||
| 2259 | index += 4; | ||
| 2260 | val = RREG32(reg); | ||
| 2261 | val = (val & and_mask) | or_mask; | ||
| 2262 | WREG32(reg, val); | ||
| 2263 | break; | ||
| 2264 | case 4: | ||
| 2265 | val = RBIOS16(index); | ||
| 2266 | index += 2; | ||
| 2267 | udelay(val); | ||
| 2268 | break; | ||
| 2269 | case 5: | ||
| 2270 | reg = id & 0x1fff; | ||
| 2271 | and_mask = RBIOS32(index); | ||
| 2272 | index += 4; | ||
| 2273 | or_mask = RBIOS32(index); | ||
| 2274 | index += 4; | ||
| 2275 | val = RREG32_PLL(reg); | ||
| 2276 | val = (val & and_mask) | or_mask; | ||
| 2277 | WREG32_PLL(reg, val); | ||
| 2278 | break; | ||
| 2279 | case 6: | ||
| 2280 | reg = id & 0x1fff; | ||
| 2281 | val = RBIOS8(index); | ||
| 2282 | index += 1; | ||
| 2283 | radeon_i2c_do_lock(tmds->i2c_bus, 1); | ||
| 2284 | radeon_i2c_sw_put_byte(tmds->i2c_bus, | ||
| 2285 | tmds->slave_addr, | ||
| 2286 | reg, val); | ||
| 2287 | radeon_i2c_do_lock(tmds->i2c_bus, 0); | ||
| 2288 | break; | ||
| 2289 | default: | ||
| 2290 | DRM_ERROR("Unknown id %d\n", id >> 13); | ||
| 2291 | break; | ||
| 2292 | } | ||
| 2293 | id = RBIOS16(index); | ||
| 2294 | } | ||
| 2295 | return true; | ||
| 2296 | } | ||
| 2297 | } | ||
| 2298 | return false; | ||
| 2299 | } | ||
| 2300 | |||
| 2017 | static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset) | 2301 | static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset) |
| 2018 | { | 2302 | { |
| 2019 | struct radeon_device *rdev = dev->dev_private; | 2303 | struct radeon_device *rdev = dev->dev_private; |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 29763ceae3af..7ab3c501b4dd 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
| @@ -445,10 +445,10 @@ static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connec | |||
| 445 | ret = connector_status_connected; | 445 | ret = connector_status_connected; |
| 446 | else { | 446 | else { |
| 447 | if (radeon_connector->ddc_bus) { | 447 | if (radeon_connector->ddc_bus) { |
| 448 | radeon_i2c_do_lock(radeon_connector, 1); | 448 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); |
| 449 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, | 449 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, |
| 450 | &radeon_connector->ddc_bus->adapter); | 450 | &radeon_connector->ddc_bus->adapter); |
| 451 | radeon_i2c_do_lock(radeon_connector, 0); | 451 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); |
| 452 | if (radeon_connector->edid) | 452 | if (radeon_connector->edid) |
| 453 | ret = connector_status_connected; | 453 | ret = connector_status_connected; |
| 454 | } | 454 | } |
| @@ -553,17 +553,17 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect | |||
| 553 | if (!encoder) | 553 | if (!encoder) |
| 554 | ret = connector_status_disconnected; | 554 | ret = connector_status_disconnected; |
| 555 | 555 | ||
| 556 | radeon_i2c_do_lock(radeon_connector, 1); | 556 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); |
| 557 | dret = radeon_ddc_probe(radeon_connector); | 557 | dret = radeon_ddc_probe(radeon_connector); |
| 558 | radeon_i2c_do_lock(radeon_connector, 0); | 558 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); |
| 559 | if (dret) { | 559 | if (dret) { |
| 560 | if (radeon_connector->edid) { | 560 | if (radeon_connector->edid) { |
| 561 | kfree(radeon_connector->edid); | 561 | kfree(radeon_connector->edid); |
| 562 | radeon_connector->edid = NULL; | 562 | radeon_connector->edid = NULL; |
| 563 | } | 563 | } |
| 564 | radeon_i2c_do_lock(radeon_connector, 1); | 564 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); |
| 565 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); | 565 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); |
| 566 | radeon_i2c_do_lock(radeon_connector, 0); | 566 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); |
| 567 | 567 | ||
| 568 | if (!radeon_connector->edid) { | 568 | if (!radeon_connector->edid) { |
| 569 | DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", | 569 | DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", |
| @@ -708,17 +708,17 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect | |||
| 708 | enum drm_connector_status ret = connector_status_disconnected; | 708 | enum drm_connector_status ret = connector_status_disconnected; |
| 709 | bool dret; | 709 | bool dret; |
| 710 | 710 | ||
| 711 | radeon_i2c_do_lock(radeon_connector, 1); | 711 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); |
| 712 | dret = radeon_ddc_probe(radeon_connector); | 712 | dret = radeon_ddc_probe(radeon_connector); |
| 713 | radeon_i2c_do_lock(radeon_connector, 0); | 713 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); |
| 714 | if (dret) { | 714 | if (dret) { |
| 715 | if (radeon_connector->edid) { | 715 | if (radeon_connector->edid) { |
| 716 | kfree(radeon_connector->edid); | 716 | kfree(radeon_connector->edid); |
| 717 | radeon_connector->edid = NULL; | 717 | radeon_connector->edid = NULL; |
| 718 | } | 718 | } |
| 719 | radeon_i2c_do_lock(radeon_connector, 1); | 719 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); |
| 720 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); | 720 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); |
| 721 | radeon_i2c_do_lock(radeon_connector, 0); | 721 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); |
| 722 | 722 | ||
| 723 | if (!radeon_connector->edid) { | 723 | if (!radeon_connector->edid) { |
| 724 | DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", | 724 | DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", |
| @@ -735,6 +735,39 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect | |||
| 735 | ret = connector_status_disconnected; | 735 | ret = connector_status_disconnected; |
| 736 | } else | 736 | } else |
| 737 | ret = connector_status_connected; | 737 | ret = connector_status_connected; |
| 738 | |||
| 739 | /* multiple connectors on the same encoder with the same ddc line | ||
| 740 | * This tends to be HDMI and DVI on the same encoder with the | ||
| 741 | * same ddc line. If the edid says HDMI, consider the HDMI port | ||
| 742 | * connected and the DVI port disconnected. If the edid doesn't | ||
| 743 | * say HDMI, vice versa. | ||
| 744 | */ | ||
| 745 | if (radeon_connector->shared_ddc && connector_status_connected) { | ||
| 746 | struct drm_device *dev = connector->dev; | ||
| 747 | struct drm_connector *list_connector; | ||
| 748 | struct radeon_connector *list_radeon_connector; | ||
| 749 | list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { | ||
| 750 | if (connector == list_connector) | ||
| 751 | continue; | ||
| 752 | list_radeon_connector = to_radeon_connector(list_connector); | ||
| 753 | if (radeon_connector->devices == list_radeon_connector->devices) { | ||
| 754 | if (drm_detect_hdmi_monitor(radeon_connector->edid)) { | ||
| 755 | if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) { | ||
| 756 | kfree(radeon_connector->edid); | ||
| 757 | radeon_connector->edid = NULL; | ||
| 758 | ret = connector_status_disconnected; | ||
| 759 | } | ||
| 760 | } else { | ||
| 761 | if ((connector->connector_type == DRM_MODE_CONNECTOR_HDMIA) || | ||
| 762 | (connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) { | ||
| 763 | kfree(radeon_connector->edid); | ||
| 764 | radeon_connector->edid = NULL; | ||
| 765 | ret = connector_status_disconnected; | ||
| 766 | } | ||
| 767 | } | ||
| 768 | } | ||
| 769 | } | ||
| 770 | } | ||
| 738 | } | 771 | } |
| 739 | } | 772 | } |
| 740 | 773 | ||
| @@ -1020,6 +1053,9 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
| 1020 | drm_connector_attach_property(&radeon_connector->base, | 1053 | drm_connector_attach_property(&radeon_connector->base, |
| 1021 | rdev->mode_info.load_detect_property, | 1054 | rdev->mode_info.load_detect_property, |
| 1022 | 1); | 1055 | 1); |
| 1056 | drm_connector_attach_property(&radeon_connector->base, | ||
| 1057 | rdev->mode_info.tv_std_property, | ||
| 1058 | 1); | ||
| 1023 | } | 1059 | } |
| 1024 | break; | 1060 | break; |
| 1025 | case DRM_MODE_CONNECTOR_LVDS: | 1061 | case DRM_MODE_CONNECTOR_LVDS: |
| @@ -1160,6 +1196,9 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
| 1160 | drm_connector_attach_property(&radeon_connector->base, | 1196 | drm_connector_attach_property(&radeon_connector->base, |
| 1161 | rdev->mode_info.load_detect_property, | 1197 | rdev->mode_info.load_detect_property, |
| 1162 | 1); | 1198 | 1); |
| 1199 | drm_connector_attach_property(&radeon_connector->base, | ||
| 1200 | rdev->mode_info.tv_std_property, | ||
| 1201 | 1); | ||
| 1163 | } | 1202 | } |
| 1164 | break; | 1203 | break; |
| 1165 | case DRM_MODE_CONNECTOR_LVDS: | 1204 | case DRM_MODE_CONNECTOR_LVDS: |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 5ab2cf96a264..65590a0f1d93 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
| @@ -76,17 +76,17 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | |||
| 76 | } | 76 | } |
| 77 | p->relocs_ptr[i] = &p->relocs[i]; | 77 | p->relocs_ptr[i] = &p->relocs[i]; |
| 78 | p->relocs[i].robj = p->relocs[i].gobj->driver_private; | 78 | p->relocs[i].robj = p->relocs[i].gobj->driver_private; |
| 79 | p->relocs[i].lobj.robj = p->relocs[i].robj; | 79 | p->relocs[i].lobj.bo = p->relocs[i].robj; |
| 80 | p->relocs[i].lobj.rdomain = r->read_domains; | 80 | p->relocs[i].lobj.rdomain = r->read_domains; |
| 81 | p->relocs[i].lobj.wdomain = r->write_domain; | 81 | p->relocs[i].lobj.wdomain = r->write_domain; |
| 82 | p->relocs[i].handle = r->handle; | 82 | p->relocs[i].handle = r->handle; |
| 83 | p->relocs[i].flags = r->flags; | 83 | p->relocs[i].flags = r->flags; |
| 84 | INIT_LIST_HEAD(&p->relocs[i].lobj.list); | 84 | INIT_LIST_HEAD(&p->relocs[i].lobj.list); |
| 85 | radeon_object_list_add_object(&p->relocs[i].lobj, | 85 | radeon_bo_list_add_object(&p->relocs[i].lobj, |
| 86 | &p->validated); | 86 | &p->validated); |
| 87 | } | 87 | } |
| 88 | } | 88 | } |
| 89 | return radeon_object_list_validate(&p->validated, p->ib->fence); | 89 | return radeon_bo_list_validate(&p->validated, p->ib->fence); |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | 92 | int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) |
| @@ -190,9 +190,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) | |||
| 190 | unsigned i; | 190 | unsigned i; |
| 191 | 191 | ||
| 192 | if (error) { | 192 | if (error) { |
| 193 | radeon_object_list_unvalidate(&parser->validated); | 193 | radeon_bo_list_unvalidate(&parser->validated, |
| 194 | parser->ib->fence); | ||
| 194 | } else { | 195 | } else { |
| 195 | radeon_object_list_clean(&parser->validated); | 196 | radeon_bo_list_unreserve(&parser->validated); |
| 196 | } | 197 | } |
| 197 | for (i = 0; i < parser->nrelocs; i++) { | 198 | for (i = 0; i < parser->nrelocs; i++) { |
| 198 | if (parser->relocs[i].gobj) { | 199 | if (parser->relocs[i].gobj) { |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 41bb76fbe734..a014ba4cc97c 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -208,6 +208,24 @@ bool radeon_card_posted(struct radeon_device *rdev) | |||
| 208 | 208 | ||
| 209 | } | 209 | } |
| 210 | 210 | ||
| 211 | bool radeon_boot_test_post_card(struct radeon_device *rdev) | ||
| 212 | { | ||
| 213 | if (radeon_card_posted(rdev)) | ||
| 214 | return true; | ||
| 215 | |||
| 216 | if (rdev->bios) { | ||
| 217 | DRM_INFO("GPU not posted. posting now...\n"); | ||
| 218 | if (rdev->is_atom_bios) | ||
| 219 | atom_asic_init(rdev->mode_info.atom_context); | ||
| 220 | else | ||
| 221 | radeon_combios_asic_init(rdev->ddev); | ||
| 222 | return true; | ||
| 223 | } else { | ||
| 224 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); | ||
| 225 | return false; | ||
| 226 | } | ||
| 227 | } | ||
| 228 | |||
| 211 | int radeon_dummy_page_init(struct radeon_device *rdev) | 229 | int radeon_dummy_page_init(struct radeon_device *rdev) |
| 212 | { | 230 | { |
| 213 | rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); | 231 | rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); |
| @@ -544,6 +562,9 @@ int radeon_device_init(struct radeon_device *rdev, | |||
| 544 | mutex_init(&rdev->cs_mutex); | 562 | mutex_init(&rdev->cs_mutex); |
| 545 | mutex_init(&rdev->ib_pool.mutex); | 563 | mutex_init(&rdev->ib_pool.mutex); |
| 546 | mutex_init(&rdev->cp.mutex); | 564 | mutex_init(&rdev->cp.mutex); |
| 565 | if (rdev->family >= CHIP_R600) | ||
| 566 | spin_lock_init(&rdev->ih.lock); | ||
| 567 | mutex_init(&rdev->gem.mutex); | ||
| 547 | rwlock_init(&rdev->fence_drv.lock); | 568 | rwlock_init(&rdev->fence_drv.lock); |
| 548 | INIT_LIST_HEAD(&rdev->gem.objects); | 569 | INIT_LIST_HEAD(&rdev->gem.objects); |
| 549 | 570 | ||
| @@ -553,7 +574,7 @@ int radeon_device_init(struct radeon_device *rdev, | |||
| 553 | return r; | 574 | return r; |
| 554 | } | 575 | } |
| 555 | 576 | ||
| 556 | if (radeon_agpmode == -1) { | 577 | if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { |
| 557 | radeon_agp_disable(rdev); | 578 | radeon_agp_disable(rdev); |
| 558 | } | 579 | } |
| 559 | 580 | ||
| @@ -633,6 +654,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
| 633 | { | 654 | { |
| 634 | struct radeon_device *rdev = dev->dev_private; | 655 | struct radeon_device *rdev = dev->dev_private; |
| 635 | struct drm_crtc *crtc; | 656 | struct drm_crtc *crtc; |
| 657 | int r; | ||
| 636 | 658 | ||
| 637 | if (dev == NULL || rdev == NULL) { | 659 | if (dev == NULL || rdev == NULL) { |
| 638 | return -ENODEV; | 660 | return -ENODEV; |
| @@ -643,18 +665,22 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
| 643 | /* unpin the front buffers */ | 665 | /* unpin the front buffers */ |
| 644 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 666 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
| 645 | struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); | 667 | struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); |
| 646 | struct radeon_object *robj; | 668 | struct radeon_bo *robj; |
| 647 | 669 | ||
| 648 | if (rfb == NULL || rfb->obj == NULL) { | 670 | if (rfb == NULL || rfb->obj == NULL) { |
| 649 | continue; | 671 | continue; |
| 650 | } | 672 | } |
| 651 | robj = rfb->obj->driver_private; | 673 | robj = rfb->obj->driver_private; |
| 652 | if (robj != rdev->fbdev_robj) { | 674 | if (robj != rdev->fbdev_rbo) { |
| 653 | radeon_object_unpin(robj); | 675 | r = radeon_bo_reserve(robj, false); |
| 676 | if (unlikely(r == 0)) { | ||
| 677 | radeon_bo_unpin(robj); | ||
| 678 | radeon_bo_unreserve(robj); | ||
| 679 | } | ||
| 654 | } | 680 | } |
| 655 | } | 681 | } |
| 656 | /* evict vram memory */ | 682 | /* evict vram memory */ |
| 657 | radeon_object_evict_vram(rdev); | 683 | radeon_bo_evict_vram(rdev); |
| 658 | /* wait for gpu to finish processing current batch */ | 684 | /* wait for gpu to finish processing current batch */ |
| 659 | radeon_fence_wait_last(rdev); | 685 | radeon_fence_wait_last(rdev); |
| 660 | 686 | ||
| @@ -662,7 +688,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
| 662 | 688 | ||
| 663 | radeon_suspend(rdev); | 689 | radeon_suspend(rdev); |
| 664 | /* evict remaining vram memory */ | 690 | /* evict remaining vram memory */ |
| 665 | radeon_object_evict_vram(rdev); | 691 | radeon_bo_evict_vram(rdev); |
| 666 | 692 | ||
| 667 | pci_save_state(dev->pdev); | 693 | pci_save_state(dev->pdev); |
| 668 | if (state.event == PM_EVENT_SUSPEND) { | 694 | if (state.event == PM_EVENT_SUSPEND) { |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index c85df4afcb7a..62b02372cb09 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
| @@ -270,10 +270,10 @@ static void radeon_print_display_setup(struct drm_device *dev) | |||
| 270 | radeon_connector->ddc_bus->rec.mask_data_reg, | 270 | radeon_connector->ddc_bus->rec.mask_data_reg, |
| 271 | radeon_connector->ddc_bus->rec.a_clk_reg, | 271 | radeon_connector->ddc_bus->rec.a_clk_reg, |
| 272 | radeon_connector->ddc_bus->rec.a_data_reg, | 272 | radeon_connector->ddc_bus->rec.a_data_reg, |
| 273 | radeon_connector->ddc_bus->rec.put_clk_reg, | 273 | radeon_connector->ddc_bus->rec.en_clk_reg, |
| 274 | radeon_connector->ddc_bus->rec.put_data_reg, | 274 | radeon_connector->ddc_bus->rec.en_data_reg, |
| 275 | radeon_connector->ddc_bus->rec.get_clk_reg, | 275 | radeon_connector->ddc_bus->rec.y_clk_reg, |
| 276 | radeon_connector->ddc_bus->rec.get_data_reg); | 276 | radeon_connector->ddc_bus->rec.y_data_reg); |
| 277 | DRM_INFO(" Encoders:\n"); | 277 | DRM_INFO(" Encoders:\n"); |
| 278 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 278 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
| 279 | radeon_encoder = to_radeon_encoder(encoder); | 279 | radeon_encoder = to_radeon_encoder(encoder); |
| @@ -324,6 +324,7 @@ static bool radeon_setup_enc_conn(struct drm_device *dev) | |||
| 324 | ret = radeon_get_legacy_connector_info_from_table(dev); | 324 | ret = radeon_get_legacy_connector_info_from_table(dev); |
| 325 | } | 325 | } |
| 326 | if (ret) { | 326 | if (ret) { |
| 327 | radeon_setup_encoder_clones(dev); | ||
| 327 | radeon_print_display_setup(dev); | 328 | radeon_print_display_setup(dev); |
| 328 | list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) | 329 | list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) |
| 329 | radeon_ddc_dump(drm_connector); | 330 | radeon_ddc_dump(drm_connector); |
| @@ -339,9 +340,9 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) | |||
| 339 | if (!radeon_connector->ddc_bus) | 340 | if (!radeon_connector->ddc_bus) |
| 340 | return -1; | 341 | return -1; |
| 341 | if (!radeon_connector->edid) { | 342 | if (!radeon_connector->edid) { |
| 342 | radeon_i2c_do_lock(radeon_connector, 1); | 343 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); |
| 343 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); | 344 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); |
| 344 | radeon_i2c_do_lock(radeon_connector, 0); | 345 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); |
| 345 | } | 346 | } |
| 346 | 347 | ||
| 347 | if (radeon_connector->edid) { | 348 | if (radeon_connector->edid) { |
| @@ -361,9 +362,9 @@ static int radeon_ddc_dump(struct drm_connector *connector) | |||
| 361 | 362 | ||
| 362 | if (!radeon_connector->ddc_bus) | 363 | if (!radeon_connector->ddc_bus) |
| 363 | return -1; | 364 | return -1; |
| 364 | radeon_i2c_do_lock(radeon_connector, 1); | 365 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); |
| 365 | edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter); | 366 | edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter); |
| 366 | radeon_i2c_do_lock(radeon_connector, 0); | 367 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); |
| 367 | if (edid) { | 368 | if (edid) { |
| 368 | kfree(edid); | 369 | kfree(edid); |
| 369 | } | 370 | } |
| @@ -750,9 +751,17 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | |||
| 750 | if (encoder->crtc != crtc) | 751 | if (encoder->crtc != crtc) |
| 751 | continue; | 752 | continue; |
| 752 | if (first) { | 753 | if (first) { |
| 753 | radeon_crtc->rmx_type = radeon_encoder->rmx_type; | 754 | /* set scaling */ |
| 755 | if (radeon_encoder->rmx_type == RMX_OFF) | ||
| 756 | radeon_crtc->rmx_type = RMX_OFF; | ||
| 757 | else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay || | ||
| 758 | mode->vdisplay < radeon_encoder->native_mode.vdisplay) | ||
| 759 | radeon_crtc->rmx_type = radeon_encoder->rmx_type; | ||
| 760 | else | ||
| 761 | radeon_crtc->rmx_type = RMX_OFF; | ||
| 762 | /* copy native mode */ | ||
| 754 | memcpy(&radeon_crtc->native_mode, | 763 | memcpy(&radeon_crtc->native_mode, |
| 755 | &radeon_encoder->native_mode, | 764 | &radeon_encoder->native_mode, |
| 756 | sizeof(struct drm_display_mode)); | 765 | sizeof(struct drm_display_mode)); |
| 757 | first = false; | 766 | first = false; |
| 758 | } else { | 767 | } else { |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 350962e0f346..e13785282a82 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h | |||
| @@ -1104,7 +1104,6 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index); | |||
| 1104 | # define R600_IT_WAIT_REG_MEM 0x00003C00 | 1104 | # define R600_IT_WAIT_REG_MEM 0x00003C00 |
| 1105 | # define R600_IT_MEM_WRITE 0x00003D00 | 1105 | # define R600_IT_MEM_WRITE 0x00003D00 |
| 1106 | # define R600_IT_INDIRECT_BUFFER 0x00003200 | 1106 | # define R600_IT_INDIRECT_BUFFER 0x00003200 |
| 1107 | # define R600_IT_CP_INTERRUPT 0x00004000 | ||
| 1108 | # define R600_IT_SURFACE_SYNC 0x00004300 | 1107 | # define R600_IT_SURFACE_SYNC 0x00004300 |
| 1109 | # define R600_CB0_DEST_BASE_ENA (1 << 6) | 1108 | # define R600_CB0_DEST_BASE_ENA (1 << 6) |
| 1110 | # define R600_TC_ACTION_ENA (1 << 23) | 1109 | # define R600_TC_ACTION_ENA (1 << 23) |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index d42bc512d75a..291f6dd3683c 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
| @@ -35,6 +35,51 @@ extern int atom_debug; | |||
| 35 | bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, | 35 | bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, |
| 36 | struct drm_display_mode *mode); | 36 | struct drm_display_mode *mode); |
| 37 | 37 | ||
| 38 | static uint32_t radeon_encoder_clones(struct drm_encoder *encoder) | ||
| 39 | { | ||
| 40 | struct drm_device *dev = encoder->dev; | ||
| 41 | struct radeon_device *rdev = dev->dev_private; | ||
| 42 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
| 43 | struct drm_encoder *clone_encoder; | ||
| 44 | uint32_t index_mask = 0; | ||
| 45 | int count; | ||
| 46 | |||
| 47 | /* DIG routing gets problematic */ | ||
| 48 | if (rdev->family >= CHIP_R600) | ||
| 49 | return index_mask; | ||
| 50 | /* LVDS/TV are too wacky */ | ||
| 51 | if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) | ||
| 52 | return index_mask; | ||
| 53 | /* DVO requires 2x ppll clocks depending on tmds chip */ | ||
| 54 | if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) | ||
| 55 | return index_mask; | ||
| 56 | |||
| 57 | count = -1; | ||
| 58 | list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) { | ||
| 59 | struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder); | ||
| 60 | count++; | ||
| 61 | |||
| 62 | if (clone_encoder == encoder) | ||
| 63 | continue; | ||
| 64 | if (radeon_clone->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
| 65 | continue; | ||
| 66 | if (radeon_clone->devices & ATOM_DEVICE_DFP2_SUPPORT) | ||
| 67 | continue; | ||
| 68 | else | ||
| 69 | index_mask |= (1 << count); | ||
| 70 | } | ||
| 71 | return index_mask; | ||
| 72 | } | ||
| 73 | |||
| 74 | void radeon_setup_encoder_clones(struct drm_device *dev) | ||
| 75 | { | ||
| 76 | struct drm_encoder *encoder; | ||
| 77 | |||
| 78 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
| 79 | encoder->possible_clones = radeon_encoder_clones(encoder); | ||
| 80 | } | ||
| 81 | } | ||
| 82 | |||
| 38 | uint32_t | 83 | uint32_t |
| 39 | radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac) | 84 | radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac) |
| 40 | { | 85 | { |
| @@ -163,29 +208,6 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder) | |||
| 163 | return NULL; | 208 | return NULL; |
| 164 | } | 209 | } |
| 165 | 210 | ||
| 166 | /* used for both atom and legacy */ | ||
| 167 | void radeon_rmx_mode_fixup(struct drm_encoder *encoder, | ||
| 168 | struct drm_display_mode *mode, | ||
| 169 | struct drm_display_mode *adjusted_mode) | ||
| 170 | { | ||
| 171 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
| 172 | struct drm_device *dev = encoder->dev; | ||
| 173 | struct radeon_device *rdev = dev->dev_private; | ||
| 174 | struct drm_display_mode *native_mode = &radeon_encoder->native_mode; | ||
| 175 | |||
| 176 | if (mode->hdisplay < native_mode->hdisplay || | ||
| 177 | mode->vdisplay < native_mode->vdisplay) { | ||
| 178 | int mode_id = adjusted_mode->base.id; | ||
| 179 | *adjusted_mode = *native_mode; | ||
| 180 | if (!ASIC_IS_AVIVO(rdev)) { | ||
| 181 | adjusted_mode->hdisplay = mode->hdisplay; | ||
| 182 | adjusted_mode->vdisplay = mode->vdisplay; | ||
| 183 | } | ||
| 184 | adjusted_mode->base.id = mode_id; | ||
| 185 | } | ||
| 186 | } | ||
| 187 | |||
| 188 | |||
| 189 | static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, | 211 | static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, |
| 190 | struct drm_display_mode *mode, | 212 | struct drm_display_mode *mode, |
| 191 | struct drm_display_mode *adjusted_mode) | 213 | struct drm_display_mode *adjusted_mode) |
| @@ -198,14 +220,24 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, | |||
| 198 | radeon_encoder_set_active_device(encoder); | 220 | radeon_encoder_set_active_device(encoder); |
| 199 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 221 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
| 200 | 222 | ||
| 201 | if (radeon_encoder->rmx_type != RMX_OFF) | ||
| 202 | radeon_rmx_mode_fixup(encoder, mode, adjusted_mode); | ||
| 203 | |||
| 204 | /* hw bug */ | 223 | /* hw bug */ |
| 205 | if ((mode->flags & DRM_MODE_FLAG_INTERLACE) | 224 | if ((mode->flags & DRM_MODE_FLAG_INTERLACE) |
| 206 | && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) | 225 | && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) |
| 207 | adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; | 226 | adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; |
| 208 | 227 | ||
| 228 | /* get the native mode for LVDS */ | ||
| 229 | if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
| 230 | struct drm_display_mode *native_mode = &radeon_encoder->native_mode; | ||
| 231 | int mode_id = adjusted_mode->base.id; | ||
| 232 | *adjusted_mode = *native_mode; | ||
| 233 | if (!ASIC_IS_AVIVO(rdev)) { | ||
| 234 | adjusted_mode->hdisplay = mode->hdisplay; | ||
| 235 | adjusted_mode->vdisplay = mode->vdisplay; | ||
| 236 | } | ||
| 237 | adjusted_mode->base.id = mode_id; | ||
| 238 | } | ||
| 239 | |||
| 240 | /* get the native mode for TV */ | ||
| 209 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { | 241 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { |
| 210 | struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; | 242 | struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; |
| 211 | if (tv_dac) { | 243 | if (tv_dac) { |
| @@ -392,7 +424,7 @@ union lvds_encoder_control { | |||
| 392 | LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2; | 424 | LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2; |
| 393 | }; | 425 | }; |
| 394 | 426 | ||
| 395 | static void | 427 | void |
| 396 | atombios_digital_setup(struct drm_encoder *encoder, int action) | 428 | atombios_digital_setup(struct drm_encoder *encoder, int action) |
| 397 | { | 429 | { |
| 398 | struct drm_device *dev = encoder->dev; | 430 | struct drm_device *dev = encoder->dev; |
| @@ -918,12 +950,12 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
| 918 | if (is_dig) { | 950 | if (is_dig) { |
| 919 | switch (mode) { | 951 | switch (mode) { |
| 920 | case DRM_MODE_DPMS_ON: | 952 | case DRM_MODE_DPMS_ON: |
| 921 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE); | 953 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT); |
| 922 | break; | 954 | break; |
| 923 | case DRM_MODE_DPMS_STANDBY: | 955 | case DRM_MODE_DPMS_STANDBY: |
| 924 | case DRM_MODE_DPMS_SUSPEND: | 956 | case DRM_MODE_DPMS_SUSPEND: |
| 925 | case DRM_MODE_DPMS_OFF: | 957 | case DRM_MODE_DPMS_OFF: |
| 926 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE); | 958 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT); |
| 927 | break; | 959 | break; |
| 928 | } | 960 | } |
| 929 | } else { | 961 | } else { |
| @@ -1354,7 +1386,6 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su | |||
| 1354 | encoder->possible_crtcs = 0x1; | 1386 | encoder->possible_crtcs = 0x1; |
| 1355 | else | 1387 | else |
| 1356 | encoder->possible_crtcs = 0x3; | 1388 | encoder->possible_crtcs = 0x3; |
| 1357 | encoder->possible_clones = 0; | ||
| 1358 | 1389 | ||
| 1359 | radeon_encoder->enc_priv = NULL; | 1390 | radeon_encoder->enc_priv = NULL; |
| 1360 | 1391 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index b38c4c8e2c61..66055b3d8668 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
| @@ -140,7 +140,7 @@ int radeonfb_create(struct drm_device *dev, | |||
| 140 | struct radeon_framebuffer *rfb; | 140 | struct radeon_framebuffer *rfb; |
| 141 | struct drm_mode_fb_cmd mode_cmd; | 141 | struct drm_mode_fb_cmd mode_cmd; |
| 142 | struct drm_gem_object *gobj = NULL; | 142 | struct drm_gem_object *gobj = NULL; |
| 143 | struct radeon_object *robj = NULL; | 143 | struct radeon_bo *rbo = NULL; |
| 144 | struct device *device = &rdev->pdev->dev; | 144 | struct device *device = &rdev->pdev->dev; |
| 145 | int size, aligned_size, ret; | 145 | int size, aligned_size, ret; |
| 146 | u64 fb_gpuaddr; | 146 | u64 fb_gpuaddr; |
| @@ -168,14 +168,14 @@ int radeonfb_create(struct drm_device *dev, | |||
| 168 | ret = radeon_gem_object_create(rdev, aligned_size, 0, | 168 | ret = radeon_gem_object_create(rdev, aligned_size, 0, |
| 169 | RADEON_GEM_DOMAIN_VRAM, | 169 | RADEON_GEM_DOMAIN_VRAM, |
| 170 | false, ttm_bo_type_kernel, | 170 | false, ttm_bo_type_kernel, |
| 171 | false, &gobj); | 171 | &gobj); |
| 172 | if (ret) { | 172 | if (ret) { |
| 173 | printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n", | 173 | printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n", |
| 174 | surface_width, surface_height); | 174 | surface_width, surface_height); |
| 175 | ret = -ENOMEM; | 175 | ret = -ENOMEM; |
| 176 | goto out; | 176 | goto out; |
| 177 | } | 177 | } |
| 178 | robj = gobj->driver_private; | 178 | rbo = gobj->driver_private; |
| 179 | 179 | ||
| 180 | if (fb_tiled) | 180 | if (fb_tiled) |
| 181 | tiling_flags = RADEON_TILING_MACRO; | 181 | tiling_flags = RADEON_TILING_MACRO; |
| @@ -192,8 +192,13 @@ int radeonfb_create(struct drm_device *dev, | |||
| 192 | } | 192 | } |
| 193 | #endif | 193 | #endif |
| 194 | 194 | ||
| 195 | if (tiling_flags) | 195 | if (tiling_flags) { |
| 196 | radeon_object_set_tiling_flags(robj, tiling_flags | RADEON_TILING_SURFACE, mode_cmd.pitch); | 196 | ret = radeon_bo_set_tiling_flags(rbo, |
| 197 | tiling_flags | RADEON_TILING_SURFACE, | ||
| 198 | mode_cmd.pitch); | ||
| 199 | if (ret) | ||
| 200 | dev_err(rdev->dev, "FB failed to set tiling flags\n"); | ||
| 201 | } | ||
| 197 | mutex_lock(&rdev->ddev->struct_mutex); | 202 | mutex_lock(&rdev->ddev->struct_mutex); |
| 198 | fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); | 203 | fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); |
| 199 | if (fb == NULL) { | 204 | if (fb == NULL) { |
| @@ -201,10 +206,19 @@ int radeonfb_create(struct drm_device *dev, | |||
| 201 | ret = -ENOMEM; | 206 | ret = -ENOMEM; |
| 202 | goto out_unref; | 207 | goto out_unref; |
| 203 | } | 208 | } |
| 204 | ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr); | 209 | ret = radeon_bo_reserve(rbo, false); |
| 210 | if (unlikely(ret != 0)) | ||
| 211 | goto out_unref; | ||
| 212 | ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr); | ||
| 213 | if (ret) { | ||
| 214 | radeon_bo_unreserve(rbo); | ||
| 215 | goto out_unref; | ||
| 216 | } | ||
| 217 | if (fb_tiled) | ||
| 218 | radeon_bo_check_tiling(rbo, 0, 0); | ||
| 219 | ret = radeon_bo_kmap(rbo, &fbptr); | ||
| 220 | radeon_bo_unreserve(rbo); | ||
| 205 | if (ret) { | 221 | if (ret) { |
| 206 | printk(KERN_ERR "failed to pin framebuffer\n"); | ||
| 207 | ret = -ENOMEM; | ||
| 208 | goto out_unref; | 222 | goto out_unref; |
| 209 | } | 223 | } |
| 210 | 224 | ||
| @@ -213,7 +227,7 @@ int radeonfb_create(struct drm_device *dev, | |||
| 213 | *fb_p = fb; | 227 | *fb_p = fb; |
| 214 | rfb = to_radeon_framebuffer(fb); | 228 | rfb = to_radeon_framebuffer(fb); |
| 215 | rdev->fbdev_rfb = rfb; | 229 | rdev->fbdev_rfb = rfb; |
| 216 | rdev->fbdev_robj = robj; | 230 | rdev->fbdev_rbo = rbo; |
| 217 | 231 | ||
| 218 | info = framebuffer_alloc(sizeof(struct radeon_fb_device), device); | 232 | info = framebuffer_alloc(sizeof(struct radeon_fb_device), device); |
| 219 | if (info == NULL) { | 233 | if (info == NULL) { |
| @@ -234,15 +248,7 @@ int radeonfb_create(struct drm_device *dev, | |||
| 234 | if (ret) | 248 | if (ret) |
| 235 | goto out_unref; | 249 | goto out_unref; |
| 236 | 250 | ||
| 237 | if (fb_tiled) | 251 | memset_io(fbptr, 0xff, aligned_size); |
| 238 | radeon_object_check_tiling(robj, 0, 0); | ||
| 239 | |||
| 240 | ret = radeon_object_kmap(robj, &fbptr); | ||
| 241 | if (ret) { | ||
| 242 | goto out_unref; | ||
| 243 | } | ||
| 244 | |||
| 245 | memset_io(fbptr, 0, aligned_size); | ||
| 246 | 252 | ||
| 247 | strcpy(info->fix.id, "radeondrmfb"); | 253 | strcpy(info->fix.id, "radeondrmfb"); |
| 248 | 254 | ||
| @@ -288,8 +294,12 @@ int radeonfb_create(struct drm_device *dev, | |||
| 288 | return 0; | 294 | return 0; |
| 289 | 295 | ||
| 290 | out_unref: | 296 | out_unref: |
| 291 | if (robj) { | 297 | if (rbo) { |
| 292 | radeon_object_kunmap(robj); | 298 | ret = radeon_bo_reserve(rbo, false); |
| 299 | if (likely(ret == 0)) { | ||
| 300 | radeon_bo_kunmap(rbo); | ||
| 301 | radeon_bo_unreserve(rbo); | ||
| 302 | } | ||
| 293 | } | 303 | } |
| 294 | if (fb && ret) { | 304 | if (fb && ret) { |
| 295 | list_del(&fb->filp_head); | 305 | list_del(&fb->filp_head); |
| @@ -321,14 +331,22 @@ int radeon_parse_options(char *options) | |||
| 321 | 331 | ||
| 322 | int radeonfb_probe(struct drm_device *dev) | 332 | int radeonfb_probe(struct drm_device *dev) |
| 323 | { | 333 | { |
| 324 | return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create); | 334 | struct radeon_device *rdev = dev->dev_private; |
| 335 | int bpp_sel = 32; | ||
| 336 | |||
| 337 | /* select 8 bpp console on RN50 or 16MB cards */ | ||
| 338 | if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024)) | ||
| 339 | bpp_sel = 8; | ||
| 340 | |||
| 341 | return drm_fb_helper_single_fb_probe(dev, bpp_sel, &radeonfb_create); | ||
| 325 | } | 342 | } |
| 326 | 343 | ||
| 327 | int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) | 344 | int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) |
| 328 | { | 345 | { |
| 329 | struct fb_info *info; | 346 | struct fb_info *info; |
| 330 | struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb); | 347 | struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb); |
| 331 | struct radeon_object *robj; | 348 | struct radeon_bo *rbo; |
| 349 | int r; | ||
| 332 | 350 | ||
| 333 | if (!fb) { | 351 | if (!fb) { |
| 334 | return -EINVAL; | 352 | return -EINVAL; |
| @@ -336,10 +354,14 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) | |||
| 336 | info = fb->fbdev; | 354 | info = fb->fbdev; |
| 337 | if (info) { | 355 | if (info) { |
| 338 | struct radeon_fb_device *rfbdev = info->par; | 356 | struct radeon_fb_device *rfbdev = info->par; |
| 339 | robj = rfb->obj->driver_private; | 357 | rbo = rfb->obj->driver_private; |
| 340 | unregister_framebuffer(info); | 358 | unregister_framebuffer(info); |
| 341 | radeon_object_kunmap(robj); | 359 | r = radeon_bo_reserve(rbo, false); |
| 342 | radeon_object_unpin(robj); | 360 | if (likely(r == 0)) { |
| 361 | radeon_bo_kunmap(rbo); | ||
| 362 | radeon_bo_unpin(rbo); | ||
| 363 | radeon_bo_unreserve(rbo); | ||
| 364 | } | ||
| 343 | drm_fb_helper_free(&rfbdev->helper); | 365 | drm_fb_helper_free(&rfbdev->helper); |
| 344 | framebuffer_release(info); | 366 | framebuffer_release(info); |
| 345 | } | 367 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 3beb26d74719..2ac31633d72c 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
| @@ -168,37 +168,6 @@ bool radeon_fence_signaled(struct radeon_fence *fence) | |||
| 168 | return signaled; | 168 | return signaled; |
| 169 | } | 169 | } |
| 170 | 170 | ||
| 171 | int r600_fence_wait(struct radeon_fence *fence, bool intr, bool lazy) | ||
| 172 | { | ||
| 173 | struct radeon_device *rdev; | ||
| 174 | int ret = 0; | ||
| 175 | |||
| 176 | rdev = fence->rdev; | ||
| 177 | |||
| 178 | __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); | ||
| 179 | |||
| 180 | while (1) { | ||
| 181 | if (radeon_fence_signaled(fence)) | ||
| 182 | break; | ||
| 183 | |||
| 184 | if (time_after_eq(jiffies, fence->timeout)) { | ||
| 185 | ret = -EBUSY; | ||
| 186 | break; | ||
| 187 | } | ||
| 188 | |||
| 189 | if (lazy) | ||
| 190 | schedule_timeout(1); | ||
| 191 | |||
| 192 | if (intr && signal_pending(current)) { | ||
| 193 | ret = -ERESTARTSYS; | ||
| 194 | break; | ||
| 195 | } | ||
| 196 | } | ||
| 197 | __set_current_state(TASK_RUNNING); | ||
| 198 | return ret; | ||
| 199 | } | ||
| 200 | |||
| 201 | |||
| 202 | int radeon_fence_wait(struct radeon_fence *fence, bool intr) | 171 | int radeon_fence_wait(struct radeon_fence *fence, bool intr) |
| 203 | { | 172 | { |
| 204 | struct radeon_device *rdev; | 173 | struct radeon_device *rdev; |
| @@ -216,13 +185,6 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) | |||
| 216 | return 0; | 185 | return 0; |
| 217 | } | 186 | } |
| 218 | 187 | ||
| 219 | if (rdev->family >= CHIP_R600) { | ||
| 220 | r = r600_fence_wait(fence, intr, 0); | ||
| 221 | if (r == -ERESTARTSYS) | ||
| 222 | return -EBUSY; | ||
| 223 | return r; | ||
| 224 | } | ||
| 225 | |||
| 226 | retry: | 188 | retry: |
| 227 | cur_jiffies = jiffies; | 189 | cur_jiffies = jiffies; |
| 228 | timeout = HZ / 100; | 190 | timeout = HZ / 100; |
| @@ -231,14 +193,18 @@ retry: | |||
| 231 | } | 193 | } |
| 232 | 194 | ||
| 233 | if (intr) { | 195 | if (intr) { |
| 196 | radeon_irq_kms_sw_irq_get(rdev); | ||
| 234 | r = wait_event_interruptible_timeout(rdev->fence_drv.queue, | 197 | r = wait_event_interruptible_timeout(rdev->fence_drv.queue, |
| 235 | radeon_fence_signaled(fence), timeout); | 198 | radeon_fence_signaled(fence), timeout); |
| 199 | radeon_irq_kms_sw_irq_put(rdev); | ||
| 236 | if (unlikely(r == -ERESTARTSYS)) { | 200 | if (unlikely(r == -ERESTARTSYS)) { |
| 237 | return -EBUSY; | 201 | return -EBUSY; |
| 238 | } | 202 | } |
| 239 | } else { | 203 | } else { |
| 204 | radeon_irq_kms_sw_irq_get(rdev); | ||
| 240 | r = wait_event_timeout(rdev->fence_drv.queue, | 205 | r = wait_event_timeout(rdev->fence_drv.queue, |
| 241 | radeon_fence_signaled(fence), timeout); | 206 | radeon_fence_signaled(fence), timeout); |
| 207 | radeon_irq_kms_sw_irq_put(rdev); | ||
| 242 | } | 208 | } |
| 243 | if (unlikely(!radeon_fence_signaled(fence))) { | 209 | if (unlikely(!radeon_fence_signaled(fence))) { |
| 244 | if (unlikely(r == 0)) { | 210 | if (unlikely(r == 0)) { |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index a68d7566178c..e73d56e83fa6 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
| @@ -78,11 +78,9 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev) | |||
| 78 | int r; | 78 | int r; |
| 79 | 79 | ||
| 80 | if (rdev->gart.table.vram.robj == NULL) { | 80 | if (rdev->gart.table.vram.robj == NULL) { |
| 81 | r = radeon_object_create(rdev, NULL, | 81 | r = radeon_bo_create(rdev, NULL, rdev->gart.table_size, |
| 82 | rdev->gart.table_size, | 82 | true, RADEON_GEM_DOMAIN_VRAM, |
| 83 | true, | 83 | &rdev->gart.table.vram.robj); |
| 84 | RADEON_GEM_DOMAIN_VRAM, | ||
| 85 | false, &rdev->gart.table.vram.robj); | ||
| 86 | if (r) { | 84 | if (r) { |
| 87 | return r; | 85 | return r; |
| 88 | } | 86 | } |
| @@ -95,32 +93,38 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev) | |||
| 95 | uint64_t gpu_addr; | 93 | uint64_t gpu_addr; |
| 96 | int r; | 94 | int r; |
| 97 | 95 | ||
| 98 | r = radeon_object_pin(rdev->gart.table.vram.robj, | 96 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); |
| 99 | RADEON_GEM_DOMAIN_VRAM, &gpu_addr); | 97 | if (unlikely(r != 0)) |
| 100 | if (r) { | ||
| 101 | radeon_object_unref(&rdev->gart.table.vram.robj); | ||
| 102 | return r; | 98 | return r; |
| 103 | } | 99 | r = radeon_bo_pin(rdev->gart.table.vram.robj, |
| 104 | r = radeon_object_kmap(rdev->gart.table.vram.robj, | 100 | RADEON_GEM_DOMAIN_VRAM, &gpu_addr); |
| 105 | (void **)&rdev->gart.table.vram.ptr); | ||
| 106 | if (r) { | 101 | if (r) { |
| 107 | radeon_object_unpin(rdev->gart.table.vram.robj); | 102 | radeon_bo_unreserve(rdev->gart.table.vram.robj); |
| 108 | radeon_object_unref(&rdev->gart.table.vram.robj); | ||
| 109 | DRM_ERROR("radeon: failed to map gart vram table.\n"); | ||
| 110 | return r; | 103 | return r; |
| 111 | } | 104 | } |
| 105 | r = radeon_bo_kmap(rdev->gart.table.vram.robj, | ||
| 106 | (void **)&rdev->gart.table.vram.ptr); | ||
| 107 | if (r) | ||
| 108 | radeon_bo_unpin(rdev->gart.table.vram.robj); | ||
| 109 | radeon_bo_unreserve(rdev->gart.table.vram.robj); | ||
| 112 | rdev->gart.table_addr = gpu_addr; | 110 | rdev->gart.table_addr = gpu_addr; |
| 113 | return 0; | 111 | return r; |
| 114 | } | 112 | } |
| 115 | 113 | ||
| 116 | void radeon_gart_table_vram_free(struct radeon_device *rdev) | 114 | void radeon_gart_table_vram_free(struct radeon_device *rdev) |
| 117 | { | 115 | { |
| 116 | int r; | ||
| 117 | |||
| 118 | if (rdev->gart.table.vram.robj == NULL) { | 118 | if (rdev->gart.table.vram.robj == NULL) { |
| 119 | return; | 119 | return; |
| 120 | } | 120 | } |
| 121 | radeon_object_kunmap(rdev->gart.table.vram.robj); | 121 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); |
| 122 | radeon_object_unpin(rdev->gart.table.vram.robj); | 122 | if (likely(r == 0)) { |
| 123 | radeon_object_unref(&rdev->gart.table.vram.robj); | 123 | radeon_bo_kunmap(rdev->gart.table.vram.robj); |
| 124 | radeon_bo_unpin(rdev->gart.table.vram.robj); | ||
| 125 | radeon_bo_unreserve(rdev->gart.table.vram.robj); | ||
| 126 | } | ||
| 127 | radeon_bo_unref(&rdev->gart.table.vram.robj); | ||
| 124 | } | 128 | } |
| 125 | 129 | ||
| 126 | 130 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index d880edf254db..e927f998f76f 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
| @@ -38,22 +38,21 @@ int radeon_gem_object_init(struct drm_gem_object *obj) | |||
| 38 | 38 | ||
| 39 | void radeon_gem_object_free(struct drm_gem_object *gobj) | 39 | void radeon_gem_object_free(struct drm_gem_object *gobj) |
| 40 | { | 40 | { |
| 41 | struct radeon_object *robj = gobj->driver_private; | 41 | struct radeon_bo *robj = gobj->driver_private; |
| 42 | 42 | ||
| 43 | gobj->driver_private = NULL; | 43 | gobj->driver_private = NULL; |
| 44 | if (robj) { | 44 | if (robj) { |
| 45 | radeon_object_unref(&robj); | 45 | radeon_bo_unref(&robj); |
| 46 | } | 46 | } |
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | int radeon_gem_object_create(struct radeon_device *rdev, int size, | 49 | int radeon_gem_object_create(struct radeon_device *rdev, int size, |
| 50 | int alignment, int initial_domain, | 50 | int alignment, int initial_domain, |
| 51 | bool discardable, bool kernel, | 51 | bool discardable, bool kernel, |
| 52 | bool interruptible, | 52 | struct drm_gem_object **obj) |
| 53 | struct drm_gem_object **obj) | ||
| 54 | { | 53 | { |
| 55 | struct drm_gem_object *gobj; | 54 | struct drm_gem_object *gobj; |
| 56 | struct radeon_object *robj; | 55 | struct radeon_bo *robj; |
| 57 | int r; | 56 | int r; |
| 58 | 57 | ||
| 59 | *obj = NULL; | 58 | *obj = NULL; |
| @@ -65,8 +64,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, | |||
| 65 | if (alignment < PAGE_SIZE) { | 64 | if (alignment < PAGE_SIZE) { |
| 66 | alignment = PAGE_SIZE; | 65 | alignment = PAGE_SIZE; |
| 67 | } | 66 | } |
| 68 | r = radeon_object_create(rdev, gobj, size, kernel, initial_domain, | 67 | r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj); |
| 69 | interruptible, &robj); | ||
| 70 | if (r) { | 68 | if (r) { |
| 71 | DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n", | 69 | DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n", |
| 72 | size, initial_domain, alignment); | 70 | size, initial_domain, alignment); |
| @@ -83,33 +81,33 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, | |||
| 83 | int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, | 81 | int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, |
| 84 | uint64_t *gpu_addr) | 82 | uint64_t *gpu_addr) |
| 85 | { | 83 | { |
| 86 | struct radeon_object *robj = obj->driver_private; | 84 | struct radeon_bo *robj = obj->driver_private; |
| 87 | uint32_t flags; | 85 | int r; |
| 88 | 86 | ||
| 89 | switch (pin_domain) { | 87 | r = radeon_bo_reserve(robj, false); |
| 90 | case RADEON_GEM_DOMAIN_VRAM: | 88 | if (unlikely(r != 0)) |
| 91 | flags = TTM_PL_FLAG_VRAM; | 89 | return r; |
| 92 | break; | 90 | r = radeon_bo_pin(robj, pin_domain, gpu_addr); |
| 93 | case RADEON_GEM_DOMAIN_GTT: | 91 | radeon_bo_unreserve(robj); |
| 94 | flags = TTM_PL_FLAG_TT; | 92 | return r; |
| 95 | break; | ||
| 96 | default: | ||
| 97 | flags = TTM_PL_FLAG_SYSTEM; | ||
| 98 | break; | ||
| 99 | } | ||
| 100 | return radeon_object_pin(robj, flags, gpu_addr); | ||
| 101 | } | 93 | } |
| 102 | 94 | ||
| 103 | void radeon_gem_object_unpin(struct drm_gem_object *obj) | 95 | void radeon_gem_object_unpin(struct drm_gem_object *obj) |
| 104 | { | 96 | { |
| 105 | struct radeon_object *robj = obj->driver_private; | 97 | struct radeon_bo *robj = obj->driver_private; |
| 106 | radeon_object_unpin(robj); | 98 | int r; |
| 99 | |||
| 100 | r = radeon_bo_reserve(robj, false); | ||
| 101 | if (likely(r == 0)) { | ||
| 102 | radeon_bo_unpin(robj); | ||
| 103 | radeon_bo_unreserve(robj); | ||
| 104 | } | ||
| 107 | } | 105 | } |
| 108 | 106 | ||
| 109 | int radeon_gem_set_domain(struct drm_gem_object *gobj, | 107 | int radeon_gem_set_domain(struct drm_gem_object *gobj, |
| 110 | uint32_t rdomain, uint32_t wdomain) | 108 | uint32_t rdomain, uint32_t wdomain) |
| 111 | { | 109 | { |
| 112 | struct radeon_object *robj; | 110 | struct radeon_bo *robj; |
| 113 | uint32_t domain; | 111 | uint32_t domain; |
| 114 | int r; | 112 | int r; |
| 115 | 113 | ||
| @@ -127,11 +125,12 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj, | |||
| 127 | } | 125 | } |
| 128 | if (domain == RADEON_GEM_DOMAIN_CPU) { | 126 | if (domain == RADEON_GEM_DOMAIN_CPU) { |
| 129 | /* Asking for cpu access wait for object idle */ | 127 | /* Asking for cpu access wait for object idle */ |
| 130 | r = radeon_object_wait(robj); | 128 | r = radeon_bo_wait(robj, NULL, false); |
| 131 | if (r) { | 129 | if (r) { |
| 132 | printk(KERN_ERR "Failed to wait for object !\n"); | 130 | printk(KERN_ERR "Failed to wait for object !\n"); |
| 133 | return r; | 131 | return r; |
| 134 | } | 132 | } |
| 133 | radeon_hdp_flush(robj->rdev); | ||
| 135 | } | 134 | } |
| 136 | return 0; | 135 | return 0; |
| 137 | } | 136 | } |
| @@ -144,7 +143,7 @@ int radeon_gem_init(struct radeon_device *rdev) | |||
| 144 | 143 | ||
| 145 | void radeon_gem_fini(struct radeon_device *rdev) | 144 | void radeon_gem_fini(struct radeon_device *rdev) |
| 146 | { | 145 | { |
| 147 | radeon_object_force_delete(rdev); | 146 | radeon_bo_force_delete(rdev); |
| 148 | } | 147 | } |
| 149 | 148 | ||
| 150 | 149 | ||
| @@ -158,9 +157,13 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, | |||
| 158 | struct drm_radeon_gem_info *args = data; | 157 | struct drm_radeon_gem_info *args = data; |
| 159 | 158 | ||
| 160 | args->vram_size = rdev->mc.real_vram_size; | 159 | args->vram_size = rdev->mc.real_vram_size; |
| 161 | /* FIXME: report somethings that makes sense */ | 160 | args->vram_visible = rdev->mc.real_vram_size; |
| 162 | args->vram_visible = rdev->mc.real_vram_size - (4 * 1024 * 1024); | 161 | if (rdev->stollen_vga_memory) |
| 163 | args->gart_size = rdev->mc.gtt_size; | 162 | args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); |
| 163 | if (rdev->fbdev_rbo) | ||
| 164 | args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo); | ||
| 165 | args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 - | ||
| 166 | RADEON_IB_POOL_SIZE*64*1024; | ||
| 164 | return 0; | 167 | return 0; |
| 165 | } | 168 | } |
| 166 | 169 | ||
| @@ -192,8 +195,8 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, | |||
| 192 | /* create a gem object to contain this object in */ | 195 | /* create a gem object to contain this object in */ |
| 193 | args->size = roundup(args->size, PAGE_SIZE); | 196 | args->size = roundup(args->size, PAGE_SIZE); |
| 194 | r = radeon_gem_object_create(rdev, args->size, args->alignment, | 197 | r = radeon_gem_object_create(rdev, args->size, args->alignment, |
| 195 | args->initial_domain, false, | 198 | args->initial_domain, false, |
| 196 | false, true, &gobj); | 199 | false, &gobj); |
| 197 | if (r) { | 200 | if (r) { |
| 198 | return r; | 201 | return r; |
| 199 | } | 202 | } |
| @@ -218,7 +221,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
| 218 | * just validate the BO into a certain domain */ | 221 | * just validate the BO into a certain domain */ |
| 219 | struct drm_radeon_gem_set_domain *args = data; | 222 | struct drm_radeon_gem_set_domain *args = data; |
| 220 | struct drm_gem_object *gobj; | 223 | struct drm_gem_object *gobj; |
| 221 | struct radeon_object *robj; | 224 | struct radeon_bo *robj; |
| 222 | int r; | 225 | int r; |
| 223 | 226 | ||
| 224 | /* for now if someone requests domain CPU - | 227 | /* for now if someone requests domain CPU - |
| @@ -244,19 +247,18 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
| 244 | { | 247 | { |
| 245 | struct drm_radeon_gem_mmap *args = data; | 248 | struct drm_radeon_gem_mmap *args = data; |
| 246 | struct drm_gem_object *gobj; | 249 | struct drm_gem_object *gobj; |
| 247 | struct radeon_object *robj; | 250 | struct radeon_bo *robj; |
| 248 | int r; | ||
| 249 | 251 | ||
| 250 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | 252 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
| 251 | if (gobj == NULL) { | 253 | if (gobj == NULL) { |
| 252 | return -EINVAL; | 254 | return -EINVAL; |
| 253 | } | 255 | } |
| 254 | robj = gobj->driver_private; | 256 | robj = gobj->driver_private; |
| 255 | r = radeon_object_mmap(robj, &args->addr_ptr); | 257 | args->addr_ptr = radeon_bo_mmap_offset(robj); |
| 256 | mutex_lock(&dev->struct_mutex); | 258 | mutex_lock(&dev->struct_mutex); |
| 257 | drm_gem_object_unreference(gobj); | 259 | drm_gem_object_unreference(gobj); |
| 258 | mutex_unlock(&dev->struct_mutex); | 260 | mutex_unlock(&dev->struct_mutex); |
| 259 | return r; | 261 | return 0; |
| 260 | } | 262 | } |
| 261 | 263 | ||
| 262 | int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, | 264 | int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, |
| @@ -264,7 +266,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
| 264 | { | 266 | { |
| 265 | struct drm_radeon_gem_busy *args = data; | 267 | struct drm_radeon_gem_busy *args = data; |
| 266 | struct drm_gem_object *gobj; | 268 | struct drm_gem_object *gobj; |
| 267 | struct radeon_object *robj; | 269 | struct radeon_bo *robj; |
| 268 | int r; | 270 | int r; |
| 269 | uint32_t cur_placement; | 271 | uint32_t cur_placement; |
| 270 | 272 | ||
| @@ -273,7 +275,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
| 273 | return -EINVAL; | 275 | return -EINVAL; |
| 274 | } | 276 | } |
| 275 | robj = gobj->driver_private; | 277 | robj = gobj->driver_private; |
| 276 | r = radeon_object_busy_domain(robj, &cur_placement); | 278 | r = radeon_bo_wait(robj, &cur_placement, true); |
| 277 | switch (cur_placement) { | 279 | switch (cur_placement) { |
| 278 | case TTM_PL_VRAM: | 280 | case TTM_PL_VRAM: |
| 279 | args->domain = RADEON_GEM_DOMAIN_VRAM; | 281 | args->domain = RADEON_GEM_DOMAIN_VRAM; |
| @@ -297,7 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |||
| 297 | { | 299 | { |
| 298 | struct drm_radeon_gem_wait_idle *args = data; | 300 | struct drm_radeon_gem_wait_idle *args = data; |
| 299 | struct drm_gem_object *gobj; | 301 | struct drm_gem_object *gobj; |
| 300 | struct radeon_object *robj; | 302 | struct radeon_bo *robj; |
| 301 | int r; | 303 | int r; |
| 302 | 304 | ||
| 303 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | 305 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
| @@ -305,10 +307,11 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |||
| 305 | return -EINVAL; | 307 | return -EINVAL; |
| 306 | } | 308 | } |
| 307 | robj = gobj->driver_private; | 309 | robj = gobj->driver_private; |
| 308 | r = radeon_object_wait(robj); | 310 | r = radeon_bo_wait(robj, NULL, false); |
| 309 | mutex_lock(&dev->struct_mutex); | 311 | mutex_lock(&dev->struct_mutex); |
| 310 | drm_gem_object_unreference(gobj); | 312 | drm_gem_object_unreference(gobj); |
| 311 | mutex_unlock(&dev->struct_mutex); | 313 | mutex_unlock(&dev->struct_mutex); |
| 314 | radeon_hdp_flush(robj->rdev); | ||
| 312 | return r; | 315 | return r; |
| 313 | } | 316 | } |
| 314 | 317 | ||
| @@ -317,7 +320,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, | |||
| 317 | { | 320 | { |
| 318 | struct drm_radeon_gem_set_tiling *args = data; | 321 | struct drm_radeon_gem_set_tiling *args = data; |
| 319 | struct drm_gem_object *gobj; | 322 | struct drm_gem_object *gobj; |
| 320 | struct radeon_object *robj; | 323 | struct radeon_bo *robj; |
| 321 | int r = 0; | 324 | int r = 0; |
| 322 | 325 | ||
| 323 | DRM_DEBUG("%d \n", args->handle); | 326 | DRM_DEBUG("%d \n", args->handle); |
| @@ -325,7 +328,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, | |||
| 325 | if (gobj == NULL) | 328 | if (gobj == NULL) |
| 326 | return -EINVAL; | 329 | return -EINVAL; |
| 327 | robj = gobj->driver_private; | 330 | robj = gobj->driver_private; |
| 328 | radeon_object_set_tiling_flags(robj, args->tiling_flags, args->pitch); | 331 | r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); |
| 329 | mutex_lock(&dev->struct_mutex); | 332 | mutex_lock(&dev->struct_mutex); |
| 330 | drm_gem_object_unreference(gobj); | 333 | drm_gem_object_unreference(gobj); |
| 331 | mutex_unlock(&dev->struct_mutex); | 334 | mutex_unlock(&dev->struct_mutex); |
| @@ -337,16 +340,19 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, | |||
| 337 | { | 340 | { |
| 338 | struct drm_radeon_gem_get_tiling *args = data; | 341 | struct drm_radeon_gem_get_tiling *args = data; |
| 339 | struct drm_gem_object *gobj; | 342 | struct drm_gem_object *gobj; |
| 340 | struct radeon_object *robj; | 343 | struct radeon_bo *rbo; |
| 341 | int r = 0; | 344 | int r = 0; |
| 342 | 345 | ||
| 343 | DRM_DEBUG("\n"); | 346 | DRM_DEBUG("\n"); |
| 344 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | 347 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
| 345 | if (gobj == NULL) | 348 | if (gobj == NULL) |
| 346 | return -EINVAL; | 349 | return -EINVAL; |
| 347 | robj = gobj->driver_private; | 350 | rbo = gobj->driver_private; |
| 348 | radeon_object_get_tiling_flags(robj, &args->tiling_flags, | 351 | r = radeon_bo_reserve(rbo, false); |
| 349 | &args->pitch); | 352 | if (unlikely(r != 0)) |
| 353 | return r; | ||
| 354 | radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); | ||
| 355 | radeon_bo_unreserve(rbo); | ||
| 350 | mutex_lock(&dev->struct_mutex); | 356 | mutex_lock(&dev->struct_mutex); |
| 351 | drm_gem_object_unreference(gobj); | 357 | drm_gem_object_unreference(gobj); |
| 352 | mutex_unlock(&dev->struct_mutex); | 358 | mutex_unlock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index dd438d32e5c0..6c645fb4dad8 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c | |||
| @@ -59,11 +59,11 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector) | |||
| 59 | } | 59 | } |
| 60 | 60 | ||
| 61 | 61 | ||
| 62 | void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state) | 62 | void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state) |
| 63 | { | 63 | { |
| 64 | struct radeon_device *rdev = radeon_connector->base.dev->dev_private; | 64 | struct radeon_device *rdev = i2c->dev->dev_private; |
| 65 | struct radeon_i2c_bus_rec *rec = &i2c->rec; | ||
| 65 | uint32_t temp; | 66 | uint32_t temp; |
| 66 | struct radeon_i2c_bus_rec *rec = &radeon_connector->ddc_bus->rec; | ||
| 67 | 67 | ||
| 68 | /* RV410 appears to have a bug where the hw i2c in reset | 68 | /* RV410 appears to have a bug where the hw i2c in reset |
| 69 | * holds the i2c port in a bad state - switch hw i2c away before | 69 | * holds the i2c port in a bad state - switch hw i2c away before |
| @@ -78,16 +78,16 @@ void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_stat | |||
| 78 | R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3))); | 78 | R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3))); |
| 79 | } | 79 | } |
| 80 | } | 80 | } |
| 81 | if (lock_state) { | ||
| 82 | temp = RREG32(rec->a_clk_reg); | ||
| 83 | temp &= ~(rec->a_clk_mask); | ||
| 84 | WREG32(rec->a_clk_reg, temp); | ||
| 85 | |||
| 86 | temp = RREG32(rec->a_data_reg); | ||
| 87 | temp &= ~(rec->a_data_mask); | ||
| 88 | WREG32(rec->a_data_reg, temp); | ||
| 89 | } | ||
| 90 | 81 | ||
| 82 | /* clear the output pin values */ | ||
| 83 | temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask; | ||
| 84 | WREG32(rec->a_clk_reg, temp); | ||
| 85 | |||
| 86 | temp = RREG32(rec->a_data_reg) & ~rec->a_data_mask; | ||
| 87 | WREG32(rec->a_data_reg, temp); | ||
| 88 | |||
| 89 | |||
| 90 | /* mask the gpio pins for software use */ | ||
| 91 | temp = RREG32(rec->mask_clk_reg); | 91 | temp = RREG32(rec->mask_clk_reg); |
| 92 | if (lock_state) | 92 | if (lock_state) |
| 93 | temp |= rec->mask_clk_mask; | 93 | temp |= rec->mask_clk_mask; |
| @@ -112,8 +112,9 @@ static int get_clock(void *i2c_priv) | |||
| 112 | struct radeon_i2c_bus_rec *rec = &i2c->rec; | 112 | struct radeon_i2c_bus_rec *rec = &i2c->rec; |
| 113 | uint32_t val; | 113 | uint32_t val; |
| 114 | 114 | ||
| 115 | val = RREG32(rec->get_clk_reg); | 115 | /* read the value off the pin */ |
| 116 | val &= rec->get_clk_mask; | 116 | val = RREG32(rec->y_clk_reg); |
| 117 | val &= rec->y_clk_mask; | ||
| 117 | 118 | ||
| 118 | return (val != 0); | 119 | return (val != 0); |
| 119 | } | 120 | } |
| @@ -126,8 +127,10 @@ static int get_data(void *i2c_priv) | |||
| 126 | struct radeon_i2c_bus_rec *rec = &i2c->rec; | 127 | struct radeon_i2c_bus_rec *rec = &i2c->rec; |
| 127 | uint32_t val; | 128 | uint32_t val; |
| 128 | 129 | ||
| 129 | val = RREG32(rec->get_data_reg); | 130 | /* read the value off the pin */ |
| 130 | val &= rec->get_data_mask; | 131 | val = RREG32(rec->y_data_reg); |
| 132 | val &= rec->y_data_mask; | ||
| 133 | |||
| 131 | return (val != 0); | 134 | return (val != 0); |
| 132 | } | 135 | } |
| 133 | 136 | ||
| @@ -138,9 +141,10 @@ static void set_clock(void *i2c_priv, int clock) | |||
| 138 | struct radeon_i2c_bus_rec *rec = &i2c->rec; | 141 | struct radeon_i2c_bus_rec *rec = &i2c->rec; |
| 139 | uint32_t val; | 142 | uint32_t val; |
| 140 | 143 | ||
| 141 | val = RREG32(rec->put_clk_reg) & (uint32_t)~(rec->put_clk_mask); | 144 | /* set pin direction */ |
| 142 | val |= clock ? 0 : rec->put_clk_mask; | 145 | val = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask; |
| 143 | WREG32(rec->put_clk_reg, val); | 146 | val |= clock ? 0 : rec->en_clk_mask; |
| 147 | WREG32(rec->en_clk_reg, val); | ||
| 144 | } | 148 | } |
| 145 | 149 | ||
| 146 | static void set_data(void *i2c_priv, int data) | 150 | static void set_data(void *i2c_priv, int data) |
| @@ -150,14 +154,15 @@ static void set_data(void *i2c_priv, int data) | |||
| 150 | struct radeon_i2c_bus_rec *rec = &i2c->rec; | 154 | struct radeon_i2c_bus_rec *rec = &i2c->rec; |
| 151 | uint32_t val; | 155 | uint32_t val; |
| 152 | 156 | ||
| 153 | val = RREG32(rec->put_data_reg) & (uint32_t)~(rec->put_data_mask); | 157 | /* set pin direction */ |
| 154 | val |= data ? 0 : rec->put_data_mask; | 158 | val = RREG32(rec->en_data_reg) & ~rec->en_data_mask; |
| 155 | WREG32(rec->put_data_reg, val); | 159 | val |= data ? 0 : rec->en_data_mask; |
| 160 | WREG32(rec->en_data_reg, val); | ||
| 156 | } | 161 | } |
| 157 | 162 | ||
| 158 | struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, | 163 | struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, |
| 159 | struct radeon_i2c_bus_rec *rec, | 164 | struct radeon_i2c_bus_rec *rec, |
| 160 | const char *name) | 165 | const char *name) |
| 161 | { | 166 | { |
| 162 | struct radeon_i2c_chan *i2c; | 167 | struct radeon_i2c_chan *i2c; |
| 163 | int ret; | 168 | int ret; |
| @@ -207,3 +212,59 @@ struct drm_encoder *radeon_best_encoder(struct drm_connector *connector) | |||
| 207 | { | 212 | { |
| 208 | return NULL; | 213 | return NULL; |
| 209 | } | 214 | } |
| 215 | |||
| 216 | void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus, | ||
| 217 | u8 slave_addr, | ||
| 218 | u8 addr, | ||
| 219 | u8 *val) | ||
| 220 | { | ||
| 221 | u8 out_buf[2]; | ||
| 222 | u8 in_buf[2]; | ||
| 223 | struct i2c_msg msgs[] = { | ||
| 224 | { | ||
| 225 | .addr = slave_addr, | ||
| 226 | .flags = 0, | ||
| 227 | .len = 1, | ||
| 228 | .buf = out_buf, | ||
| 229 | }, | ||
| 230 | { | ||
| 231 | .addr = slave_addr, | ||
| 232 | .flags = I2C_M_RD, | ||
| 233 | .len = 1, | ||
| 234 | .buf = in_buf, | ||
| 235 | } | ||
| 236 | }; | ||
| 237 | |||
| 238 | out_buf[0] = addr; | ||
| 239 | out_buf[1] = 0; | ||
| 240 | |||
| 241 | if (i2c_transfer(&i2c_bus->adapter, msgs, 2) == 2) { | ||
| 242 | *val = in_buf[0]; | ||
| 243 | DRM_DEBUG("val = 0x%02x\n", *val); | ||
| 244 | } else { | ||
| 245 | DRM_ERROR("i2c 0x%02x 0x%02x read failed\n", | ||
| 246 | addr, *val); | ||
| 247 | } | ||
| 248 | } | ||
| 249 | |||
| 250 | void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c_bus, | ||
| 251 | u8 slave_addr, | ||
| 252 | u8 addr, | ||
| 253 | u8 val) | ||
| 254 | { | ||
| 255 | uint8_t out_buf[2]; | ||
| 256 | struct i2c_msg msg = { | ||
| 257 | .addr = slave_addr, | ||
| 258 | .flags = 0, | ||
| 259 | .len = 2, | ||
| 260 | .buf = out_buf, | ||
| 261 | }; | ||
| 262 | |||
| 263 | out_buf[0] = addr; | ||
| 264 | out_buf[1] = val; | ||
| 265 | |||
| 266 | if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1) | ||
| 267 | DRM_ERROR("i2c 0x%02x 0x%02x write failed\n", | ||
| 268 | addr, val); | ||
| 269 | } | ||
| 270 | |||
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index a0fe6232dcb6..26789970c5cf 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c | |||
| @@ -87,17 +87,25 @@ int radeon_irq_kms_init(struct radeon_device *rdev) | |||
| 87 | 87 | ||
| 88 | if (rdev->flags & RADEON_SINGLE_CRTC) | 88 | if (rdev->flags & RADEON_SINGLE_CRTC) |
| 89 | num_crtc = 1; | 89 | num_crtc = 1; |
| 90 | 90 | spin_lock_init(&rdev->irq.sw_lock); | |
| 91 | r = drm_vblank_init(rdev->ddev, num_crtc); | 91 | r = drm_vblank_init(rdev->ddev, num_crtc); |
| 92 | if (r) { | 92 | if (r) { |
| 93 | return r; | 93 | return r; |
| 94 | } | 94 | } |
| 95 | /* enable msi */ | 95 | /* enable msi */ |
| 96 | rdev->msi_enabled = 0; | 96 | rdev->msi_enabled = 0; |
| 97 | if (rdev->family >= CHIP_RV380) { | 97 | /* MSIs don't seem to work on my rs780; |
| 98 | * not sure about rs880 or other rs780s. | ||
| 99 | * Needs more investigation. | ||
| 100 | */ | ||
| 101 | if ((rdev->family >= CHIP_RV380) && | ||
| 102 | (rdev->family != CHIP_RS780) && | ||
| 103 | (rdev->family != CHIP_RS880)) { | ||
| 98 | int ret = pci_enable_msi(rdev->pdev); | 104 | int ret = pci_enable_msi(rdev->pdev); |
| 99 | if (!ret) | 105 | if (!ret) { |
| 100 | rdev->msi_enabled = 1; | 106 | rdev->msi_enabled = 1; |
| 107 | DRM_INFO("radeon: using MSI.\n"); | ||
| 108 | } | ||
| 101 | } | 109 | } |
| 102 | drm_irq_install(rdev->ddev); | 110 | drm_irq_install(rdev->ddev); |
| 103 | rdev->irq.installed = true; | 111 | rdev->irq.installed = true; |
| @@ -114,3 +122,29 @@ void radeon_irq_kms_fini(struct radeon_device *rdev) | |||
| 114 | pci_disable_msi(rdev->pdev); | 122 | pci_disable_msi(rdev->pdev); |
| 115 | } | 123 | } |
| 116 | } | 124 | } |
| 125 | |||
| 126 | void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev) | ||
| 127 | { | ||
| 128 | unsigned long irqflags; | ||
| 129 | |||
| 130 | spin_lock_irqsave(&rdev->irq.sw_lock, irqflags); | ||
| 131 | if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount == 1)) { | ||
| 132 | rdev->irq.sw_int = true; | ||
| 133 | radeon_irq_set(rdev); | ||
| 134 | } | ||
| 135 | spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags); | ||
| 136 | } | ||
| 137 | |||
| 138 | void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev) | ||
| 139 | { | ||
| 140 | unsigned long irqflags; | ||
| 141 | |||
| 142 | spin_lock_irqsave(&rdev->irq.sw_lock, irqflags); | ||
| 143 | BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount <= 0); | ||
| 144 | if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount == 0)) { | ||
| 145 | rdev->irq.sw_int = false; | ||
| 146 | radeon_irq_set(rdev); | ||
| 147 | } | ||
| 148 | spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags); | ||
| 149 | } | ||
| 150 | |||
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 8d0b7aa87fa4..b82ede98e152 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
| @@ -30,6 +30,18 @@ | |||
| 30 | #include "radeon.h" | 30 | #include "radeon.h" |
| 31 | #include "atom.h" | 31 | #include "atom.h" |
| 32 | 32 | ||
| 33 | static void radeon_overscan_setup(struct drm_crtc *crtc, | ||
| 34 | struct drm_display_mode *mode) | ||
| 35 | { | ||
| 36 | struct drm_device *dev = crtc->dev; | ||
| 37 | struct radeon_device *rdev = dev->dev_private; | ||
| 38 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
| 39 | |||
| 40 | WREG32(RADEON_OVR_CLR + radeon_crtc->crtc_offset, 0); | ||
| 41 | WREG32(RADEON_OVR_WID_LEFT_RIGHT + radeon_crtc->crtc_offset, 0); | ||
| 42 | WREG32(RADEON_OVR_WID_TOP_BOTTOM + radeon_crtc->crtc_offset, 0); | ||
| 43 | } | ||
| 44 | |||
| 33 | static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, | 45 | static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, |
| 34 | struct drm_display_mode *mode, | 46 | struct drm_display_mode *mode, |
| 35 | struct drm_display_mode *adjusted_mode) | 47 | struct drm_display_mode *adjusted_mode) |
| @@ -292,8 +304,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 292 | uint32_t mask; | 304 | uint32_t mask; |
| 293 | 305 | ||
| 294 | if (radeon_crtc->crtc_id) | 306 | if (radeon_crtc->crtc_id) |
| 295 | mask = (RADEON_CRTC2_EN | | 307 | mask = (RADEON_CRTC2_DISP_DIS | |
| 296 | RADEON_CRTC2_DISP_DIS | | ||
| 297 | RADEON_CRTC2_VSYNC_DIS | | 308 | RADEON_CRTC2_VSYNC_DIS | |
| 298 | RADEON_CRTC2_HSYNC_DIS | | 309 | RADEON_CRTC2_HSYNC_DIS | |
| 299 | RADEON_CRTC2_DISP_REQ_EN_B); | 310 | RADEON_CRTC2_DISP_REQ_EN_B); |
| @@ -305,7 +316,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 305 | switch (mode) { | 316 | switch (mode) { |
| 306 | case DRM_MODE_DPMS_ON: | 317 | case DRM_MODE_DPMS_ON: |
| 307 | if (radeon_crtc->crtc_id) | 318 | if (radeon_crtc->crtc_id) |
| 308 | WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~mask); | 319 | WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask)); |
| 309 | else { | 320 | else { |
| 310 | WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN | | 321 | WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN | |
| 311 | RADEON_CRTC_DISP_REQ_EN_B)); | 322 | RADEON_CRTC_DISP_REQ_EN_B)); |
| @@ -319,7 +330,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 319 | case DRM_MODE_DPMS_OFF: | 330 | case DRM_MODE_DPMS_OFF: |
| 320 | drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); | 331 | drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); |
| 321 | if (radeon_crtc->crtc_id) | 332 | if (radeon_crtc->crtc_id) |
| 322 | WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask); | 333 | WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask)); |
| 323 | else { | 334 | else { |
| 324 | WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN | | 335 | WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN | |
| 325 | RADEON_CRTC_DISP_REQ_EN_B)); | 336 | RADEON_CRTC_DISP_REQ_EN_B)); |
| @@ -400,14 +411,21 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
| 400 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 411 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
| 401 | struct radeon_framebuffer *radeon_fb; | 412 | struct radeon_framebuffer *radeon_fb; |
| 402 | struct drm_gem_object *obj; | 413 | struct drm_gem_object *obj; |
| 414 | struct radeon_bo *rbo; | ||
| 403 | uint64_t base; | 415 | uint64_t base; |
| 404 | uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0; | 416 | uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0; |
| 405 | uint32_t crtc_pitch, pitch_pixels; | 417 | uint32_t crtc_pitch, pitch_pixels; |
| 406 | uint32_t tiling_flags; | 418 | uint32_t tiling_flags; |
| 407 | int format; | 419 | int format; |
| 408 | uint32_t gen_cntl_reg, gen_cntl_val; | 420 | uint32_t gen_cntl_reg, gen_cntl_val; |
| 421 | int r; | ||
| 409 | 422 | ||
| 410 | DRM_DEBUG("\n"); | 423 | DRM_DEBUG("\n"); |
| 424 | /* no fb bound */ | ||
| 425 | if (!crtc->fb) { | ||
| 426 | DRM_DEBUG("No FB bound\n"); | ||
| 427 | return 0; | ||
| 428 | } | ||
| 411 | 429 | ||
| 412 | radeon_fb = to_radeon_framebuffer(crtc->fb); | 430 | radeon_fb = to_radeon_framebuffer(crtc->fb); |
| 413 | 431 | ||
| @@ -431,10 +449,22 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
| 431 | return false; | 449 | return false; |
| 432 | } | 450 | } |
| 433 | 451 | ||
| 452 | /* Pin framebuffer & get tilling informations */ | ||
| 434 | obj = radeon_fb->obj; | 453 | obj = radeon_fb->obj; |
| 435 | if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) { | 454 | rbo = obj->driver_private; |
| 455 | r = radeon_bo_reserve(rbo, false); | ||
| 456 | if (unlikely(r != 0)) | ||
| 457 | return r; | ||
| 458 | r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base); | ||
| 459 | if (unlikely(r != 0)) { | ||
| 460 | radeon_bo_unreserve(rbo); | ||
| 436 | return -EINVAL; | 461 | return -EINVAL; |
| 437 | } | 462 | } |
| 463 | radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); | ||
| 464 | radeon_bo_unreserve(rbo); | ||
| 465 | if (tiling_flags & RADEON_TILING_MICRO) | ||
| 466 | DRM_ERROR("trying to scanout microtiled buffer\n"); | ||
| 467 | |||
| 438 | /* if scanout was in GTT this really wouldn't work */ | 468 | /* if scanout was in GTT this really wouldn't work */ |
| 439 | /* crtc offset is from display base addr not FB location */ | 469 | /* crtc offset is from display base addr not FB location */ |
| 440 | radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location; | 470 | radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location; |
| @@ -449,10 +479,6 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
| 449 | (crtc->fb->bits_per_pixel * 8)); | 479 | (crtc->fb->bits_per_pixel * 8)); |
| 450 | crtc_pitch |= crtc_pitch << 16; | 480 | crtc_pitch |= crtc_pitch << 16; |
| 451 | 481 | ||
| 452 | radeon_object_get_tiling_flags(obj->driver_private, | ||
| 453 | &tiling_flags, NULL); | ||
| 454 | if (tiling_flags & RADEON_TILING_MICRO) | ||
| 455 | DRM_ERROR("trying to scanout microtiled buffer\n"); | ||
| 456 | 482 | ||
| 457 | if (tiling_flags & RADEON_TILING_MACRO) { | 483 | if (tiling_flags & RADEON_TILING_MACRO) { |
| 458 | if (ASIC_IS_R300(rdev)) | 484 | if (ASIC_IS_R300(rdev)) |
| @@ -530,7 +556,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
| 530 | 556 | ||
| 531 | if (old_fb && old_fb != crtc->fb) { | 557 | if (old_fb && old_fb != crtc->fb) { |
| 532 | radeon_fb = to_radeon_framebuffer(old_fb); | 558 | radeon_fb = to_radeon_framebuffer(old_fb); |
| 533 | radeon_gem_object_unpin(radeon_fb->obj); | 559 | rbo = radeon_fb->obj->driver_private; |
| 560 | r = radeon_bo_reserve(rbo, false); | ||
| 561 | if (unlikely(r != 0)) | ||
| 562 | return r; | ||
| 563 | radeon_bo_unpin(rbo); | ||
| 564 | radeon_bo_unreserve(rbo); | ||
| 534 | } | 565 | } |
| 535 | 566 | ||
| 536 | /* Bytes per pixel may have changed */ | 567 | /* Bytes per pixel may have changed */ |
| @@ -642,12 +673,8 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod | |||
| 642 | uint32_t crtc2_gen_cntl; | 673 | uint32_t crtc2_gen_cntl; |
| 643 | uint32_t disp2_merge_cntl; | 674 | uint32_t disp2_merge_cntl; |
| 644 | 675 | ||
| 645 | /* check to see if TV DAC is enabled for another crtc and keep it enabled */ | 676 | /* if TV DAC is enabled for another crtc and keep it enabled */ |
| 646 | if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_CRT2_ON) | 677 | crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0x00718080; |
| 647 | crtc2_gen_cntl = RADEON_CRTC2_CRT2_ON; | ||
| 648 | else | ||
| 649 | crtc2_gen_cntl = 0; | ||
| 650 | |||
| 651 | crtc2_gen_cntl |= ((format << 8) | 678 | crtc2_gen_cntl |= ((format << 8) |
| 652 | | RADEON_CRTC2_VSYNC_DIS | 679 | | RADEON_CRTC2_VSYNC_DIS |
| 653 | | RADEON_CRTC2_HSYNC_DIS | 680 | | RADEON_CRTC2_HSYNC_DIS |
| @@ -676,7 +703,8 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod | |||
| 676 | uint32_t crtc_ext_cntl; | 703 | uint32_t crtc_ext_cntl; |
| 677 | uint32_t disp_merge_cntl; | 704 | uint32_t disp_merge_cntl; |
| 678 | 705 | ||
| 679 | crtc_gen_cntl = (RADEON_CRTC_EXT_DISP_EN | 706 | crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0x00718000; |
| 707 | crtc_gen_cntl |= (RADEON_CRTC_EXT_DISP_EN | ||
| 680 | | (format << 8) | 708 | | (format << 8) |
| 681 | | RADEON_CRTC_DISP_REQ_EN_B | 709 | | RADEON_CRTC_DISP_REQ_EN_B |
| 682 | | ((mode->flags & DRM_MODE_FLAG_DBLSCAN) | 710 | | ((mode->flags & DRM_MODE_FLAG_DBLSCAN) |
| @@ -779,15 +807,17 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
| 779 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) | 807 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) |
| 780 | pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; | 808 | pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; |
| 781 | if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { | 809 | if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { |
| 782 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 810 | if (!rdev->is_atom_bios) { |
| 783 | struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv; | 811 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
| 784 | if (lvds) { | 812 | struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv; |
| 785 | if (lvds->use_bios_dividers) { | 813 | if (lvds) { |
| 786 | pll_ref_div = lvds->panel_ref_divider; | 814 | if (lvds->use_bios_dividers) { |
| 787 | pll_fb_post_div = (lvds->panel_fb_divider | | 815 | pll_ref_div = lvds->panel_ref_divider; |
| 788 | (lvds->panel_post_divider << 16)); | 816 | pll_fb_post_div = (lvds->panel_fb_divider | |
| 789 | htotal_cntl = 0; | 817 | (lvds->panel_post_divider << 16)); |
| 790 | use_bios_divs = true; | 818 | htotal_cntl = 0; |
| 819 | use_bios_divs = true; | ||
| 820 | } | ||
| 791 | } | 821 | } |
| 792 | } | 822 | } |
| 793 | pll_flags |= RADEON_PLL_USE_REF_DIV; | 823 | pll_flags |= RADEON_PLL_USE_REF_DIV; |
| @@ -1027,6 +1057,7 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc, | |||
| 1027 | radeon_crtc_set_base(crtc, x, y, old_fb); | 1057 | radeon_crtc_set_base(crtc, x, y, old_fb); |
| 1028 | radeon_set_crtc_timing(crtc, adjusted_mode); | 1058 | radeon_set_crtc_timing(crtc, adjusted_mode); |
| 1029 | radeon_set_pll(crtc, adjusted_mode); | 1059 | radeon_set_pll(crtc, adjusted_mode); |
| 1060 | radeon_overscan_setup(crtc, adjusted_mode); | ||
| 1030 | if (radeon_crtc->crtc_id == 0) { | 1061 | if (radeon_crtc->crtc_id == 0) { |
| 1031 | radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode); | 1062 | radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode); |
| 1032 | } else { | 1063 | } else { |
| @@ -1042,12 +1073,29 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc, | |||
| 1042 | 1073 | ||
| 1043 | static void radeon_crtc_prepare(struct drm_crtc *crtc) | 1074 | static void radeon_crtc_prepare(struct drm_crtc *crtc) |
| 1044 | { | 1075 | { |
| 1045 | radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); | 1076 | struct drm_device *dev = crtc->dev; |
| 1077 | struct drm_crtc *crtci; | ||
| 1078 | |||
| 1079 | /* | ||
| 1080 | * The hardware wedges sometimes if you reconfigure one CRTC | ||
| 1081 | * whilst another is running (see fdo bug #24611). | ||
| 1082 | */ | ||
| 1083 | list_for_each_entry(crtci, &dev->mode_config.crtc_list, head) | ||
| 1084 | radeon_crtc_dpms(crtci, DRM_MODE_DPMS_OFF); | ||
| 1046 | } | 1085 | } |
| 1047 | 1086 | ||
| 1048 | static void radeon_crtc_commit(struct drm_crtc *crtc) | 1087 | static void radeon_crtc_commit(struct drm_crtc *crtc) |
| 1049 | { | 1088 | { |
| 1050 | radeon_crtc_dpms(crtc, DRM_MODE_DPMS_ON); | 1089 | struct drm_device *dev = crtc->dev; |
| 1090 | struct drm_crtc *crtci; | ||
| 1091 | |||
| 1092 | /* | ||
| 1093 | * Reenable the CRTCs that should be running. | ||
| 1094 | */ | ||
| 1095 | list_for_each_entry(crtci, &dev->mode_config.crtc_list, head) { | ||
| 1096 | if (crtci->enabled) | ||
| 1097 | radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON); | ||
| 1098 | } | ||
| 1051 | } | 1099 | } |
| 1052 | 1100 | ||
| 1053 | static const struct drm_crtc_helper_funcs legacy_helper_funcs = { | 1101 | static const struct drm_crtc_helper_funcs legacy_helper_funcs = { |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 00382122869b..df00515e81fa 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
| @@ -136,7 +136,14 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, | |||
| 136 | lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN; | 136 | lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN; |
| 137 | 137 | ||
| 138 | lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL); | 138 | lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL); |
| 139 | if ((!rdev->is_atom_bios)) { | 139 | if (rdev->is_atom_bios) { |
| 140 | /* LVDS_GEN_CNTL parameters are computed in LVDSEncoderControl | ||
| 141 | * need to call that on resume to set up the reg properly. | ||
| 142 | */ | ||
| 143 | radeon_encoder->pixel_clock = adjusted_mode->clock; | ||
| 144 | atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE); | ||
| 145 | lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); | ||
| 146 | } else { | ||
| 140 | struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv; | 147 | struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv; |
| 141 | if (lvds) { | 148 | if (lvds) { |
| 142 | DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl); | 149 | DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl); |
| @@ -147,8 +154,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, | |||
| 147 | (lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT)); | 154 | (lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT)); |
| 148 | } else | 155 | } else |
| 149 | lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); | 156 | lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); |
| 150 | } else | 157 | } |
| 151 | lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); | ||
| 152 | lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; | 158 | lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; |
| 153 | lvds_gen_cntl &= ~(RADEON_LVDS_ON | | 159 | lvds_gen_cntl &= ~(RADEON_LVDS_ON | |
| 154 | RADEON_LVDS_BLON | | 160 | RADEON_LVDS_BLON | |
| @@ -184,9 +190,9 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, | |||
| 184 | radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); | 190 | radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); |
| 185 | } | 191 | } |
| 186 | 192 | ||
| 187 | static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder, | 193 | static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder, |
| 188 | struct drm_display_mode *mode, | 194 | struct drm_display_mode *mode, |
| 189 | struct drm_display_mode *adjusted_mode) | 195 | struct drm_display_mode *adjusted_mode) |
| 190 | { | 196 | { |
| 191 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 197 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
| 192 | 198 | ||
| @@ -194,15 +200,22 @@ static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder, | |||
| 194 | radeon_encoder_set_active_device(encoder); | 200 | radeon_encoder_set_active_device(encoder); |
| 195 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 201 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
| 196 | 202 | ||
| 197 | if (radeon_encoder->rmx_type != RMX_OFF) | 203 | /* get the native mode for LVDS */ |
| 198 | radeon_rmx_mode_fixup(encoder, mode, adjusted_mode); | 204 | if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { |
| 205 | struct drm_display_mode *native_mode = &radeon_encoder->native_mode; | ||
| 206 | int mode_id = adjusted_mode->base.id; | ||
| 207 | *adjusted_mode = *native_mode; | ||
| 208 | adjusted_mode->hdisplay = mode->hdisplay; | ||
| 209 | adjusted_mode->vdisplay = mode->vdisplay; | ||
| 210 | adjusted_mode->base.id = mode_id; | ||
| 211 | } | ||
| 199 | 212 | ||
| 200 | return true; | 213 | return true; |
| 201 | } | 214 | } |
| 202 | 215 | ||
| 203 | static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = { | 216 | static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = { |
| 204 | .dpms = radeon_legacy_lvds_dpms, | 217 | .dpms = radeon_legacy_lvds_dpms, |
| 205 | .mode_fixup = radeon_legacy_lvds_mode_fixup, | 218 | .mode_fixup = radeon_legacy_mode_fixup, |
| 206 | .prepare = radeon_legacy_lvds_prepare, | 219 | .prepare = radeon_legacy_lvds_prepare, |
| 207 | .mode_set = radeon_legacy_lvds_mode_set, | 220 | .mode_set = radeon_legacy_lvds_mode_set, |
| 208 | .commit = radeon_legacy_lvds_commit, | 221 | .commit = radeon_legacy_lvds_commit, |
| @@ -214,17 +227,6 @@ static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = { | |||
| 214 | .destroy = radeon_enc_destroy, | 227 | .destroy = radeon_enc_destroy, |
| 215 | }; | 228 | }; |
| 216 | 229 | ||
| 217 | static bool radeon_legacy_primary_dac_mode_fixup(struct drm_encoder *encoder, | ||
| 218 | struct drm_display_mode *mode, | ||
| 219 | struct drm_display_mode *adjusted_mode) | ||
| 220 | { | ||
| 221 | /* set the active encoder to connector routing */ | ||
| 222 | radeon_encoder_set_active_device(encoder); | ||
| 223 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
| 224 | |||
| 225 | return true; | ||
| 226 | } | ||
| 227 | |||
| 228 | static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode) | 230 | static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode) |
| 229 | { | 231 | { |
| 230 | struct drm_device *dev = encoder->dev; | 232 | struct drm_device *dev = encoder->dev; |
| @@ -410,7 +412,7 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc | |||
| 410 | 412 | ||
| 411 | static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = { | 413 | static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = { |
| 412 | .dpms = radeon_legacy_primary_dac_dpms, | 414 | .dpms = radeon_legacy_primary_dac_dpms, |
| 413 | .mode_fixup = radeon_legacy_primary_dac_mode_fixup, | 415 | .mode_fixup = radeon_legacy_mode_fixup, |
| 414 | .prepare = radeon_legacy_primary_dac_prepare, | 416 | .prepare = radeon_legacy_primary_dac_prepare, |
| 415 | .mode_set = radeon_legacy_primary_dac_mode_set, | 417 | .mode_set = radeon_legacy_primary_dac_mode_set, |
| 416 | .commit = radeon_legacy_primary_dac_commit, | 418 | .commit = radeon_legacy_primary_dac_commit, |
| @@ -423,16 +425,6 @@ static const struct drm_encoder_funcs radeon_legacy_primary_dac_enc_funcs = { | |||
| 423 | .destroy = radeon_enc_destroy, | 425 | .destroy = radeon_enc_destroy, |
| 424 | }; | 426 | }; |
| 425 | 427 | ||
| 426 | static bool radeon_legacy_tmds_int_mode_fixup(struct drm_encoder *encoder, | ||
| 427 | struct drm_display_mode *mode, | ||
| 428 | struct drm_display_mode *adjusted_mode) | ||
| 429 | { | ||
| 430 | |||
| 431 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
| 432 | |||
| 433 | return true; | ||
| 434 | } | ||
| 435 | |||
| 436 | static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode) | 428 | static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode) |
| 437 | { | 429 | { |
| 438 | struct drm_device *dev = encoder->dev; | 430 | struct drm_device *dev = encoder->dev; |
| @@ -584,7 +576,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder, | |||
| 584 | 576 | ||
| 585 | static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = { | 577 | static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = { |
| 586 | .dpms = radeon_legacy_tmds_int_dpms, | 578 | .dpms = radeon_legacy_tmds_int_dpms, |
| 587 | .mode_fixup = radeon_legacy_tmds_int_mode_fixup, | 579 | .mode_fixup = radeon_legacy_mode_fixup, |
| 588 | .prepare = radeon_legacy_tmds_int_prepare, | 580 | .prepare = radeon_legacy_tmds_int_prepare, |
| 589 | .mode_set = radeon_legacy_tmds_int_mode_set, | 581 | .mode_set = radeon_legacy_tmds_int_mode_set, |
| 590 | .commit = radeon_legacy_tmds_int_commit, | 582 | .commit = radeon_legacy_tmds_int_commit, |
| @@ -596,17 +588,6 @@ static const struct drm_encoder_funcs radeon_legacy_tmds_int_enc_funcs = { | |||
| 596 | .destroy = radeon_enc_destroy, | 588 | .destroy = radeon_enc_destroy, |
| 597 | }; | 589 | }; |
| 598 | 590 | ||
| 599 | static bool radeon_legacy_tmds_ext_mode_fixup(struct drm_encoder *encoder, | ||
| 600 | struct drm_display_mode *mode, | ||
| 601 | struct drm_display_mode *adjusted_mode) | ||
| 602 | { | ||
| 603 | /* set the active encoder to connector routing */ | ||
| 604 | radeon_encoder_set_active_device(encoder); | ||
| 605 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
| 606 | |||
| 607 | return true; | ||
| 608 | } | ||
| 609 | |||
| 610 | static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode) | 591 | static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode) |
| 611 | { | 592 | { |
| 612 | struct drm_device *dev = encoder->dev; | 593 | struct drm_device *dev = encoder->dev; |
| @@ -697,6 +678,8 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, | |||
| 697 | /*if (mode->clock > 165000) | 678 | /*if (mode->clock > 165000) |
| 698 | fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/ | 679 | fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/ |
| 699 | } | 680 | } |
| 681 | if (!radeon_combios_external_tmds_setup(encoder)) | ||
| 682 | radeon_external_tmds_setup(encoder); | ||
| 700 | } | 683 | } |
| 701 | 684 | ||
| 702 | if (radeon_crtc->crtc_id == 0) { | 685 | if (radeon_crtc->crtc_id == 0) { |
| @@ -724,9 +707,22 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, | |||
| 724 | radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); | 707 | radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); |
| 725 | } | 708 | } |
| 726 | 709 | ||
| 710 | static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder) | ||
| 711 | { | ||
| 712 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
| 713 | struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv; | ||
| 714 | if (tmds) { | ||
| 715 | if (tmds->i2c_bus) | ||
| 716 | radeon_i2c_destroy(tmds->i2c_bus); | ||
| 717 | } | ||
| 718 | kfree(radeon_encoder->enc_priv); | ||
| 719 | drm_encoder_cleanup(encoder); | ||
| 720 | kfree(radeon_encoder); | ||
| 721 | } | ||
| 722 | |||
| 727 | static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = { | 723 | static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = { |
| 728 | .dpms = radeon_legacy_tmds_ext_dpms, | 724 | .dpms = radeon_legacy_tmds_ext_dpms, |
| 729 | .mode_fixup = radeon_legacy_tmds_ext_mode_fixup, | 725 | .mode_fixup = radeon_legacy_mode_fixup, |
| 730 | .prepare = radeon_legacy_tmds_ext_prepare, | 726 | .prepare = radeon_legacy_tmds_ext_prepare, |
| 731 | .mode_set = radeon_legacy_tmds_ext_mode_set, | 727 | .mode_set = radeon_legacy_tmds_ext_mode_set, |
| 732 | .commit = radeon_legacy_tmds_ext_commit, | 728 | .commit = radeon_legacy_tmds_ext_commit, |
| @@ -735,20 +731,9 @@ static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs | |||
| 735 | 731 | ||
| 736 | 732 | ||
| 737 | static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = { | 733 | static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = { |
| 738 | .destroy = radeon_enc_destroy, | 734 | .destroy = radeon_ext_tmds_enc_destroy, |
| 739 | }; | 735 | }; |
| 740 | 736 | ||
| 741 | static bool radeon_legacy_tv_dac_mode_fixup(struct drm_encoder *encoder, | ||
| 742 | struct drm_display_mode *mode, | ||
| 743 | struct drm_display_mode *adjusted_mode) | ||
| 744 | { | ||
| 745 | /* set the active encoder to connector routing */ | ||
| 746 | radeon_encoder_set_active_device(encoder); | ||
| 747 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
| 748 | |||
| 749 | return true; | ||
| 750 | } | ||
| 751 | |||
| 752 | static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode) | 737 | static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode) |
| 753 | { | 738 | { |
| 754 | struct drm_device *dev = encoder->dev; | 739 | struct drm_device *dev = encoder->dev; |
| @@ -1265,7 +1250,7 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder | |||
| 1265 | 1250 | ||
| 1266 | static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = { | 1251 | static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = { |
| 1267 | .dpms = radeon_legacy_tv_dac_dpms, | 1252 | .dpms = radeon_legacy_tv_dac_dpms, |
| 1268 | .mode_fixup = radeon_legacy_tv_dac_mode_fixup, | 1253 | .mode_fixup = radeon_legacy_mode_fixup, |
| 1269 | .prepare = radeon_legacy_tv_dac_prepare, | 1254 | .prepare = radeon_legacy_tv_dac_prepare, |
| 1270 | .mode_set = radeon_legacy_tv_dac_mode_set, | 1255 | .mode_set = radeon_legacy_tv_dac_mode_set, |
| 1271 | .commit = radeon_legacy_tv_dac_commit, | 1256 | .commit = radeon_legacy_tv_dac_commit, |
| @@ -1302,6 +1287,29 @@ static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon | |||
| 1302 | return tmds; | 1287 | return tmds; |
| 1303 | } | 1288 | } |
| 1304 | 1289 | ||
| 1290 | static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct radeon_encoder *encoder) | ||
| 1291 | { | ||
| 1292 | struct drm_device *dev = encoder->base.dev; | ||
| 1293 | struct radeon_device *rdev = dev->dev_private; | ||
| 1294 | struct radeon_encoder_ext_tmds *tmds = NULL; | ||
| 1295 | bool ret; | ||
| 1296 | |||
| 1297 | if (rdev->is_atom_bios) | ||
| 1298 | return NULL; | ||
| 1299 | |||
| 1300 | tmds = kzalloc(sizeof(struct radeon_encoder_ext_tmds), GFP_KERNEL); | ||
| 1301 | |||
| 1302 | if (!tmds) | ||
| 1303 | return NULL; | ||
| 1304 | |||
| 1305 | ret = radeon_legacy_get_ext_tmds_info_from_combios(encoder, tmds); | ||
| 1306 | |||
| 1307 | if (ret == false) | ||
| 1308 | radeon_legacy_get_ext_tmds_info_from_table(encoder, tmds); | ||
| 1309 | |||
| 1310 | return tmds; | ||
| 1311 | } | ||
| 1312 | |||
| 1305 | void | 1313 | void |
| 1306 | radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) | 1314 | radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) |
| 1307 | { | 1315 | { |
| @@ -1329,7 +1337,6 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t | |||
| 1329 | encoder->possible_crtcs = 0x1; | 1337 | encoder->possible_crtcs = 0x1; |
| 1330 | else | 1338 | else |
| 1331 | encoder->possible_crtcs = 0x3; | 1339 | encoder->possible_crtcs = 0x3; |
| 1332 | encoder->possible_clones = 0; | ||
| 1333 | 1340 | ||
| 1334 | radeon_encoder->enc_priv = NULL; | 1341 | radeon_encoder->enc_priv = NULL; |
| 1335 | 1342 | ||
| @@ -1373,7 +1380,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t | |||
| 1373 | drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS); | 1380 | drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS); |
| 1374 | drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs); | 1381 | drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs); |
| 1375 | if (!rdev->is_atom_bios) | 1382 | if (!rdev->is_atom_bios) |
| 1376 | radeon_combios_get_ext_tmds_info(radeon_encoder); | 1383 | radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder); |
| 1377 | break; | 1384 | break; |
| 1378 | } | 1385 | } |
| 1379 | } | 1386 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index ace726aa0d76..135693d5437e 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
| @@ -89,24 +89,38 @@ enum radeon_tv_std { | |||
| 89 | TV_STD_PAL_CN, | 89 | TV_STD_PAL_CN, |
| 90 | }; | 90 | }; |
| 91 | 91 | ||
| 92 | /* radeon gpio-based i2c | ||
| 93 | * 1. "mask" reg and bits | ||
| 94 | * grabs the gpio pins for software use | ||
| 95 | * 0=not held 1=held | ||
| 96 | * 2. "a" reg and bits | ||
| 97 | * output pin value | ||
| 98 | * 0=low 1=high | ||
| 99 | * 3. "en" reg and bits | ||
| 100 | * sets the pin direction | ||
| 101 | * 0=input 1=output | ||
| 102 | * 4. "y" reg and bits | ||
| 103 | * input pin value | ||
| 104 | * 0=low 1=high | ||
| 105 | */ | ||
| 92 | struct radeon_i2c_bus_rec { | 106 | struct radeon_i2c_bus_rec { |
| 93 | bool valid; | 107 | bool valid; |
| 94 | uint32_t mask_clk_reg; | 108 | uint32_t mask_clk_reg; |
| 95 | uint32_t mask_data_reg; | 109 | uint32_t mask_data_reg; |
| 96 | uint32_t a_clk_reg; | 110 | uint32_t a_clk_reg; |
| 97 | uint32_t a_data_reg; | 111 | uint32_t a_data_reg; |
| 98 | uint32_t put_clk_reg; | 112 | uint32_t en_clk_reg; |
| 99 | uint32_t put_data_reg; | 113 | uint32_t en_data_reg; |
| 100 | uint32_t get_clk_reg; | 114 | uint32_t y_clk_reg; |
| 101 | uint32_t get_data_reg; | 115 | uint32_t y_data_reg; |
| 102 | uint32_t mask_clk_mask; | 116 | uint32_t mask_clk_mask; |
| 103 | uint32_t mask_data_mask; | 117 | uint32_t mask_data_mask; |
| 104 | uint32_t put_clk_mask; | ||
| 105 | uint32_t put_data_mask; | ||
| 106 | uint32_t get_clk_mask; | ||
| 107 | uint32_t get_data_mask; | ||
| 108 | uint32_t a_clk_mask; | 118 | uint32_t a_clk_mask; |
| 109 | uint32_t a_data_mask; | 119 | uint32_t a_data_mask; |
| 120 | uint32_t en_clk_mask; | ||
| 121 | uint32_t en_data_mask; | ||
| 122 | uint32_t y_clk_mask; | ||
| 123 | uint32_t y_data_mask; | ||
| 110 | }; | 124 | }; |
| 111 | 125 | ||
| 112 | struct radeon_tmds_pll { | 126 | struct radeon_tmds_pll { |
| @@ -170,6 +184,11 @@ enum radeon_connector_table { | |||
| 170 | CT_EMAC, | 184 | CT_EMAC, |
| 171 | }; | 185 | }; |
| 172 | 186 | ||
| 187 | enum radeon_dvo_chip { | ||
| 188 | DVO_SIL164, | ||
| 189 | DVO_SIL1178, | ||
| 190 | }; | ||
| 191 | |||
| 173 | struct radeon_mode_info { | 192 | struct radeon_mode_info { |
| 174 | struct atom_context *atom_context; | 193 | struct atom_context *atom_context; |
| 175 | struct card_info *atom_card_info; | 194 | struct card_info *atom_card_info; |
| @@ -261,6 +280,13 @@ struct radeon_encoder_int_tmds { | |||
| 261 | struct radeon_tmds_pll tmds_pll[4]; | 280 | struct radeon_tmds_pll tmds_pll[4]; |
| 262 | }; | 281 | }; |
| 263 | 282 | ||
| 283 | struct radeon_encoder_ext_tmds { | ||
| 284 | /* tmds over dvo */ | ||
| 285 | struct radeon_i2c_chan *i2c_bus; | ||
| 286 | uint8_t slave_addr; | ||
| 287 | enum radeon_dvo_chip dvo_chip; | ||
| 288 | }; | ||
| 289 | |||
| 264 | /* spread spectrum */ | 290 | /* spread spectrum */ |
| 265 | struct radeon_atom_ss { | 291 | struct radeon_atom_ss { |
| 266 | uint16_t percentage; | 292 | uint16_t percentage; |
| @@ -329,6 +355,14 @@ extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, | |||
| 329 | struct radeon_i2c_bus_rec *rec, | 355 | struct radeon_i2c_bus_rec *rec, |
| 330 | const char *name); | 356 | const char *name); |
| 331 | extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c); | 357 | extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c); |
| 358 | extern void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus, | ||
| 359 | u8 slave_addr, | ||
| 360 | u8 addr, | ||
| 361 | u8 *val); | ||
| 362 | extern void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c, | ||
| 363 | u8 slave_addr, | ||
| 364 | u8 addr, | ||
| 365 | u8 val); | ||
| 332 | extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); | 366 | extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); |
| 333 | extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); | 367 | extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); |
| 334 | 368 | ||
| @@ -343,12 +377,15 @@ extern void radeon_compute_pll(struct radeon_pll *pll, | |||
| 343 | uint32_t *post_div_p, | 377 | uint32_t *post_div_p, |
| 344 | int flags); | 378 | int flags); |
| 345 | 379 | ||
| 380 | extern void radeon_setup_encoder_clones(struct drm_device *dev); | ||
| 381 | |||
| 346 | struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index); | 382 | struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index); |
| 347 | struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv); | 383 | struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv); |
| 348 | struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv); | 384 | struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv); |
| 349 | struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index); | 385 | struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index); |
| 350 | struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index); | 386 | struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index); |
| 351 | extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action); | 387 | extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action); |
| 388 | extern void atombios_digital_setup(struct drm_encoder *encoder, int action); | ||
| 352 | extern int atombios_get_encoder_mode(struct drm_encoder *encoder); | 389 | extern int atombios_get_encoder_mode(struct drm_encoder *encoder); |
| 353 | extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); | 390 | extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); |
| 354 | 391 | ||
| @@ -378,12 +415,16 @@ extern bool radeon_atom_get_clock_info(struct drm_device *dev); | |||
| 378 | extern bool radeon_combios_get_clock_info(struct drm_device *dev); | 415 | extern bool radeon_combios_get_clock_info(struct drm_device *dev); |
| 379 | extern struct radeon_encoder_atom_dig * | 416 | extern struct radeon_encoder_atom_dig * |
| 380 | radeon_atombios_get_lvds_info(struct radeon_encoder *encoder); | 417 | radeon_atombios_get_lvds_info(struct radeon_encoder *encoder); |
| 381 | bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, | 418 | extern bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, |
| 382 | struct radeon_encoder_int_tmds *tmds); | 419 | struct radeon_encoder_int_tmds *tmds); |
| 383 | bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder, | 420 | extern bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder, |
| 384 | struct radeon_encoder_int_tmds *tmds); | 421 | struct radeon_encoder_int_tmds *tmds); |
| 385 | bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder, | 422 | extern bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder, |
| 386 | struct radeon_encoder_int_tmds *tmds); | 423 | struct radeon_encoder_int_tmds *tmds); |
| 424 | extern bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder, | ||
| 425 | struct radeon_encoder_ext_tmds *tmds); | ||
| 426 | extern bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder, | ||
| 427 | struct radeon_encoder_ext_tmds *tmds); | ||
| 387 | extern struct radeon_encoder_primary_dac * | 428 | extern struct radeon_encoder_primary_dac * |
| 388 | radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder); | 429 | radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder); |
| 389 | extern struct radeon_encoder_tv_dac * | 430 | extern struct radeon_encoder_tv_dac * |
| @@ -395,6 +436,8 @@ extern struct radeon_encoder_tv_dac * | |||
| 395 | radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder); | 436 | radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder); |
| 396 | extern struct radeon_encoder_primary_dac * | 437 | extern struct radeon_encoder_primary_dac * |
| 397 | radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder); | 438 | radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder); |
| 439 | extern bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder); | ||
| 440 | extern void radeon_external_tmds_setup(struct drm_encoder *encoder); | ||
| 398 | extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock); | 441 | extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock); |
| 399 | extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev); | 442 | extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev); |
| 400 | extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock); | 443 | extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock); |
| @@ -426,16 +469,13 @@ void radeon_atombios_init_crtc(struct drm_device *dev, | |||
| 426 | struct radeon_crtc *radeon_crtc); | 469 | struct radeon_crtc *radeon_crtc); |
| 427 | void radeon_legacy_init_crtc(struct drm_device *dev, | 470 | void radeon_legacy_init_crtc(struct drm_device *dev, |
| 428 | struct radeon_crtc *radeon_crtc); | 471 | struct radeon_crtc *radeon_crtc); |
| 429 | void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state); | 472 | extern void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state); |
| 430 | 473 | ||
| 431 | void radeon_get_clock_info(struct drm_device *dev); | 474 | void radeon_get_clock_info(struct drm_device *dev); |
| 432 | 475 | ||
| 433 | extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev); | 476 | extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev); |
| 434 | extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev); | 477 | extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev); |
| 435 | 478 | ||
| 436 | void radeon_rmx_mode_fixup(struct drm_encoder *encoder, | ||
| 437 | struct drm_display_mode *mode, | ||
| 438 | struct drm_display_mode *adjusted_mode); | ||
| 439 | void radeon_enc_destroy(struct drm_encoder *encoder); | 479 | void radeon_enc_destroy(struct drm_encoder *encoder); |
| 440 | void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); | 480 | void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); |
| 441 | void radeon_combios_asic_init(struct drm_device *dev); | 481 | void radeon_combios_asic_init(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 1f056dadc5c2..bec494384825 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
| @@ -34,74 +34,32 @@ | |||
| 34 | #include "radeon_drm.h" | 34 | #include "radeon_drm.h" |
| 35 | #include "radeon.h" | 35 | #include "radeon.h" |
| 36 | 36 | ||
| 37 | struct radeon_object { | ||
| 38 | struct ttm_buffer_object tobj; | ||
| 39 | struct list_head list; | ||
| 40 | struct radeon_device *rdev; | ||
| 41 | struct drm_gem_object *gobj; | ||
| 42 | struct ttm_bo_kmap_obj kmap; | ||
| 43 | unsigned pin_count; | ||
| 44 | uint64_t gpu_addr; | ||
| 45 | void *kptr; | ||
| 46 | bool is_iomem; | ||
| 47 | uint32_t tiling_flags; | ||
| 48 | uint32_t pitch; | ||
| 49 | int surface_reg; | ||
| 50 | }; | ||
| 51 | 37 | ||
| 52 | int radeon_ttm_init(struct radeon_device *rdev); | 38 | int radeon_ttm_init(struct radeon_device *rdev); |
| 53 | void radeon_ttm_fini(struct radeon_device *rdev); | 39 | void radeon_ttm_fini(struct radeon_device *rdev); |
| 40 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); | ||
| 54 | 41 | ||
| 55 | /* | 42 | /* |
| 56 | * To exclude mutual BO access we rely on bo_reserve exclusion, as all | 43 | * To exclude mutual BO access we rely on bo_reserve exclusion, as all |
| 57 | * function are calling it. | 44 | * function are calling it. |
| 58 | */ | 45 | */ |
| 59 | 46 | ||
| 60 | static int radeon_object_reserve(struct radeon_object *robj, bool interruptible) | 47 | static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) |
| 61 | { | 48 | { |
| 62 | return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0); | 49 | struct radeon_bo *bo; |
| 63 | } | ||
| 64 | 50 | ||
| 65 | static void radeon_object_unreserve(struct radeon_object *robj) | 51 | bo = container_of(tbo, struct radeon_bo, tbo); |
| 66 | { | 52 | mutex_lock(&bo->rdev->gem.mutex); |
| 67 | ttm_bo_unreserve(&robj->tobj); | 53 | list_del_init(&bo->list); |
| 54 | mutex_unlock(&bo->rdev->gem.mutex); | ||
| 55 | radeon_bo_clear_surface_reg(bo); | ||
| 56 | kfree(bo); | ||
| 68 | } | 57 | } |
| 69 | 58 | ||
| 70 | static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj) | 59 | static inline u32 radeon_ttm_flags_from_domain(u32 domain) |
| 71 | { | 60 | { |
| 72 | struct radeon_object *robj; | 61 | u32 flags = 0; |
| 73 | |||
| 74 | robj = container_of(tobj, struct radeon_object, tobj); | ||
| 75 | list_del_init(&robj->list); | ||
| 76 | radeon_object_clear_surface_reg(robj); | ||
| 77 | kfree(robj); | ||
| 78 | } | ||
| 79 | |||
| 80 | static inline void radeon_object_gpu_addr(struct radeon_object *robj) | ||
| 81 | { | ||
| 82 | /* Default gpu address */ | ||
| 83 | robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL; | ||
| 84 | if (robj->tobj.mem.mm_node == NULL) { | ||
| 85 | return; | ||
| 86 | } | ||
| 87 | robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT; | ||
| 88 | switch (robj->tobj.mem.mem_type) { | ||
| 89 | case TTM_PL_VRAM: | ||
| 90 | robj->gpu_addr += (u64)robj->rdev->mc.vram_location; | ||
| 91 | break; | ||
| 92 | case TTM_PL_TT: | ||
| 93 | robj->gpu_addr += (u64)robj->rdev->mc.gtt_location; | ||
| 94 | break; | ||
| 95 | default: | ||
| 96 | DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type); | ||
| 97 | robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL; | ||
| 98 | return; | ||
| 99 | } | ||
| 100 | } | ||
| 101 | 62 | ||
| 102 | static inline uint32_t radeon_object_flags_from_domain(uint32_t domain) | ||
| 103 | { | ||
| 104 | uint32_t flags = 0; | ||
| 105 | if (domain & RADEON_GEM_DOMAIN_VRAM) { | 63 | if (domain & RADEON_GEM_DOMAIN_VRAM) { |
| 106 | flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; | 64 | flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; |
| 107 | } | 65 | } |
| @@ -117,17 +75,13 @@ static inline uint32_t radeon_object_flags_from_domain(uint32_t domain) | |||
| 117 | return flags; | 75 | return flags; |
| 118 | } | 76 | } |
| 119 | 77 | ||
| 120 | int radeon_object_create(struct radeon_device *rdev, | 78 | int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, |
| 121 | struct drm_gem_object *gobj, | 79 | unsigned long size, bool kernel, u32 domain, |
| 122 | unsigned long size, | 80 | struct radeon_bo **bo_ptr) |
| 123 | bool kernel, | ||
| 124 | uint32_t domain, | ||
| 125 | bool interruptible, | ||
| 126 | struct radeon_object **robj_ptr) | ||
| 127 | { | 81 | { |
| 128 | struct radeon_object *robj; | 82 | struct radeon_bo *bo; |
| 129 | enum ttm_bo_type type; | 83 | enum ttm_bo_type type; |
| 130 | uint32_t flags; | 84 | u32 flags; |
| 131 | int r; | 85 | int r; |
| 132 | 86 | ||
| 133 | if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { | 87 | if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { |
| @@ -138,206 +92,140 @@ int radeon_object_create(struct radeon_device *rdev, | |||
| 138 | } else { | 92 | } else { |
| 139 | type = ttm_bo_type_device; | 93 | type = ttm_bo_type_device; |
| 140 | } | 94 | } |
| 141 | *robj_ptr = NULL; | 95 | *bo_ptr = NULL; |
| 142 | robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL); | 96 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); |
| 143 | if (robj == NULL) { | 97 | if (bo == NULL) |
| 144 | return -ENOMEM; | 98 | return -ENOMEM; |
| 145 | } | 99 | bo->rdev = rdev; |
| 146 | robj->rdev = rdev; | 100 | bo->gobj = gobj; |
| 147 | robj->gobj = gobj; | 101 | bo->surface_reg = -1; |
| 148 | robj->surface_reg = -1; | 102 | INIT_LIST_HEAD(&bo->list); |
| 149 | INIT_LIST_HEAD(&robj->list); | 103 | |
| 150 | 104 | flags = radeon_ttm_flags_from_domain(domain); | |
| 151 | flags = radeon_object_flags_from_domain(domain); | 105 | retry: |
| 152 | r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags, | 106 | r = ttm_buffer_object_init(&rdev->mman.bdev, &bo->tbo, size, type, |
| 153 | 0, 0, false, NULL, size, | 107 | flags, 0, 0, true, NULL, size, |
| 154 | &radeon_ttm_object_object_destroy); | 108 | &radeon_ttm_bo_destroy); |
| 155 | if (unlikely(r != 0)) { | 109 | if (unlikely(r != 0)) { |
| 110 | if (r == -ERESTART) | ||
| 111 | goto retry; | ||
| 156 | /* ttm call radeon_ttm_object_object_destroy if error happen */ | 112 | /* ttm call radeon_ttm_object_object_destroy if error happen */ |
| 157 | DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n", | 113 | dev_err(rdev->dev, "object_init failed for (%ld, 0x%08X)\n", |
| 158 | size, flags, 0); | 114 | size, flags); |
| 159 | return r; | 115 | return r; |
| 160 | } | 116 | } |
| 161 | *robj_ptr = robj; | 117 | *bo_ptr = bo; |
| 162 | if (gobj) { | 118 | if (gobj) { |
| 163 | list_add_tail(&robj->list, &rdev->gem.objects); | 119 | mutex_lock(&bo->rdev->gem.mutex); |
| 120 | list_add_tail(&bo->list, &rdev->gem.objects); | ||
| 121 | mutex_unlock(&bo->rdev->gem.mutex); | ||
| 164 | } | 122 | } |
| 165 | return 0; | 123 | return 0; |
| 166 | } | 124 | } |
| 167 | 125 | ||
| 168 | int radeon_object_kmap(struct radeon_object *robj, void **ptr) | 126 | int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) |
| 169 | { | 127 | { |
| 128 | bool is_iomem; | ||
| 170 | int r; | 129 | int r; |
| 171 | 130 | ||
| 172 | spin_lock(&robj->tobj.lock); | 131 | if (bo->kptr) { |
| 173 | if (robj->kptr) { | ||
| 174 | if (ptr) { | 132 | if (ptr) { |
| 175 | *ptr = robj->kptr; | 133 | *ptr = bo->kptr; |
| 176 | } | 134 | } |
| 177 | spin_unlock(&robj->tobj.lock); | ||
| 178 | return 0; | 135 | return 0; |
| 179 | } | 136 | } |
| 180 | spin_unlock(&robj->tobj.lock); | 137 | r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); |
| 181 | r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap); | ||
| 182 | if (r) { | 138 | if (r) { |
| 183 | return r; | 139 | return r; |
| 184 | } | 140 | } |
| 185 | spin_lock(&robj->tobj.lock); | 141 | bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); |
| 186 | robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem); | ||
| 187 | spin_unlock(&robj->tobj.lock); | ||
| 188 | if (ptr) { | 142 | if (ptr) { |
| 189 | *ptr = robj->kptr; | 143 | *ptr = bo->kptr; |
| 190 | } | 144 | } |
| 191 | radeon_object_check_tiling(robj, 0, 0); | 145 | radeon_bo_check_tiling(bo, 0, 0); |
| 192 | return 0; | 146 | return 0; |
| 193 | } | 147 | } |
| 194 | 148 | ||
| 195 | void radeon_object_kunmap(struct radeon_object *robj) | 149 | void radeon_bo_kunmap(struct radeon_bo *bo) |
| 196 | { | 150 | { |
| 197 | spin_lock(&robj->tobj.lock); | 151 | if (bo->kptr == NULL) |
| 198 | if (robj->kptr == NULL) { | ||
| 199 | spin_unlock(&robj->tobj.lock); | ||
| 200 | return; | 152 | return; |
| 201 | } | 153 | bo->kptr = NULL; |
| 202 | robj->kptr = NULL; | 154 | radeon_bo_check_tiling(bo, 0, 0); |
| 203 | spin_unlock(&robj->tobj.lock); | 155 | ttm_bo_kunmap(&bo->kmap); |
| 204 | radeon_object_check_tiling(robj, 0, 0); | ||
| 205 | ttm_bo_kunmap(&robj->kmap); | ||
| 206 | } | 156 | } |
| 207 | 157 | ||
| 208 | void radeon_object_unref(struct radeon_object **robj) | 158 | void radeon_bo_unref(struct radeon_bo **bo) |
| 209 | { | 159 | { |
| 210 | struct ttm_buffer_object *tobj; | 160 | struct ttm_buffer_object *tbo; |
| 211 | 161 | ||
| 212 | if ((*robj) == NULL) { | 162 | if ((*bo) == NULL) |
| 213 | return; | 163 | return; |
| 214 | } | 164 | tbo = &((*bo)->tbo); |
| 215 | tobj = &((*robj)->tobj); | 165 | ttm_bo_unref(&tbo); |
| 216 | ttm_bo_unref(&tobj); | 166 | if (tbo == NULL) |
| 217 | if (tobj == NULL) { | 167 | *bo = NULL; |
| 218 | *robj = NULL; | ||
| 219 | } | ||
| 220 | } | ||
| 221 | |||
| 222 | int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset) | ||
| 223 | { | ||
| 224 | *offset = robj->tobj.addr_space_offset; | ||
| 225 | return 0; | ||
| 226 | } | 168 | } |
| 227 | 169 | ||
| 228 | int radeon_object_pin(struct radeon_object *robj, uint32_t domain, | 170 | int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) |
| 229 | uint64_t *gpu_addr) | ||
| 230 | { | 171 | { |
| 231 | uint32_t flags; | 172 | u32 flags; |
| 232 | uint32_t tmp; | 173 | u32 tmp; |
| 233 | int r; | 174 | int r; |
| 234 | 175 | ||
| 235 | flags = radeon_object_flags_from_domain(domain); | 176 | flags = radeon_ttm_flags_from_domain(domain); |
| 236 | spin_lock(&robj->tobj.lock); | 177 | if (bo->pin_count) { |
| 237 | if (robj->pin_count) { | 178 | bo->pin_count++; |
| 238 | robj->pin_count++; | 179 | if (gpu_addr) |
| 239 | if (gpu_addr != NULL) { | 180 | *gpu_addr = radeon_bo_gpu_offset(bo); |
| 240 | *gpu_addr = robj->gpu_addr; | ||
| 241 | } | ||
| 242 | spin_unlock(&robj->tobj.lock); | ||
| 243 | return 0; | 181 | return 0; |
| 244 | } | 182 | } |
| 245 | spin_unlock(&robj->tobj.lock); | 183 | tmp = bo->tbo.mem.placement; |
| 246 | r = radeon_object_reserve(robj, false); | ||
| 247 | if (unlikely(r != 0)) { | ||
| 248 | DRM_ERROR("radeon: failed to reserve object for pinning it.\n"); | ||
| 249 | return r; | ||
| 250 | } | ||
| 251 | tmp = robj->tobj.mem.placement; | ||
| 252 | ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); | 184 | ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); |
| 253 | robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; | 185 | bo->tbo.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | |
| 254 | r = ttm_buffer_object_validate(&robj->tobj, | 186 | TTM_PL_MASK_CACHING; |
| 255 | robj->tobj.proposed_placement, | 187 | retry: |
| 256 | false, false); | 188 | r = ttm_buffer_object_validate(&bo->tbo, bo->tbo.proposed_placement, |
| 257 | radeon_object_gpu_addr(robj); | 189 | true, false); |
| 258 | if (gpu_addr != NULL) { | 190 | if (likely(r == 0)) { |
| 259 | *gpu_addr = robj->gpu_addr; | 191 | bo->pin_count = 1; |
| 192 | if (gpu_addr != NULL) | ||
| 193 | *gpu_addr = radeon_bo_gpu_offset(bo); | ||
| 260 | } | 194 | } |
| 261 | robj->pin_count = 1; | ||
| 262 | if (unlikely(r != 0)) { | 195 | if (unlikely(r != 0)) { |
| 263 | DRM_ERROR("radeon: failed to pin object.\n"); | 196 | if (r == -ERESTART) |
| 197 | goto retry; | ||
| 198 | dev_err(bo->rdev->dev, "%p pin failed\n", bo); | ||
| 264 | } | 199 | } |
| 265 | radeon_object_unreserve(robj); | ||
| 266 | return r; | 200 | return r; |
| 267 | } | 201 | } |
| 268 | 202 | ||
| 269 | void radeon_object_unpin(struct radeon_object *robj) | 203 | int radeon_bo_unpin(struct radeon_bo *bo) |
| 270 | { | 204 | { |
| 271 | uint32_t flags; | ||
| 272 | int r; | 205 | int r; |
| 273 | 206 | ||
| 274 | spin_lock(&robj->tobj.lock); | 207 | if (!bo->pin_count) { |
| 275 | if (!robj->pin_count) { | 208 | dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); |
| 276 | spin_unlock(&robj->tobj.lock); | 209 | return 0; |
| 277 | printk(KERN_WARNING "Unpin not necessary for %p !\n", robj); | ||
| 278 | return; | ||
| 279 | } | ||
| 280 | robj->pin_count--; | ||
| 281 | if (robj->pin_count) { | ||
| 282 | spin_unlock(&robj->tobj.lock); | ||
| 283 | return; | ||
| 284 | } | ||
| 285 | spin_unlock(&robj->tobj.lock); | ||
| 286 | r = radeon_object_reserve(robj, false); | ||
| 287 | if (unlikely(r != 0)) { | ||
| 288 | DRM_ERROR("radeon: failed to reserve object for unpinning it.\n"); | ||
| 289 | return; | ||
| 290 | } | ||
| 291 | flags = robj->tobj.mem.placement; | ||
| 292 | robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT; | ||
| 293 | r = ttm_buffer_object_validate(&robj->tobj, | ||
| 294 | robj->tobj.proposed_placement, | ||
| 295 | false, false); | ||
| 296 | if (unlikely(r != 0)) { | ||
| 297 | DRM_ERROR("radeon: failed to unpin buffer.\n"); | ||
| 298 | } | ||
| 299 | radeon_object_unreserve(robj); | ||
| 300 | } | ||
| 301 | |||
| 302 | int radeon_object_wait(struct radeon_object *robj) | ||
| 303 | { | ||
| 304 | int r = 0; | ||
| 305 | |||
| 306 | /* FIXME: should use block reservation instead */ | ||
| 307 | r = radeon_object_reserve(robj, true); | ||
| 308 | if (unlikely(r != 0)) { | ||
| 309 | DRM_ERROR("radeon: failed to reserve object for waiting.\n"); | ||
| 310 | return r; | ||
| 311 | } | ||
| 312 | spin_lock(&robj->tobj.lock); | ||
| 313 | if (robj->tobj.sync_obj) { | ||
| 314 | r = ttm_bo_wait(&robj->tobj, true, true, false); | ||
| 315 | } | 210 | } |
| 316 | spin_unlock(&robj->tobj.lock); | 211 | bo->pin_count--; |
| 317 | radeon_object_unreserve(robj); | 212 | if (bo->pin_count) |
| 318 | return r; | 213 | return 0; |
| 319 | } | 214 | bo->tbo.proposed_placement = bo->tbo.mem.placement & |
| 320 | 215 | ~TTM_PL_FLAG_NO_EVICT; | |
| 321 | int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement) | 216 | retry: |
| 322 | { | 217 | r = ttm_buffer_object_validate(&bo->tbo, bo->tbo.proposed_placement, |
| 323 | int r = 0; | 218 | true, false); |
| 324 | |||
| 325 | r = radeon_object_reserve(robj, true); | ||
| 326 | if (unlikely(r != 0)) { | 219 | if (unlikely(r != 0)) { |
| 327 | DRM_ERROR("radeon: failed to reserve object for waiting.\n"); | 220 | if (r == -ERESTART) |
| 221 | goto retry; | ||
| 222 | dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); | ||
| 328 | return r; | 223 | return r; |
| 329 | } | 224 | } |
| 330 | spin_lock(&robj->tobj.lock); | 225 | return 0; |
| 331 | *cur_placement = robj->tobj.mem.mem_type; | ||
| 332 | if (robj->tobj.sync_obj) { | ||
| 333 | r = ttm_bo_wait(&robj->tobj, true, true, true); | ||
| 334 | } | ||
| 335 | spin_unlock(&robj->tobj.lock); | ||
| 336 | radeon_object_unreserve(robj); | ||
| 337 | return r; | ||
| 338 | } | 226 | } |
| 339 | 227 | ||
| 340 | int radeon_object_evict_vram(struct radeon_device *rdev) | 228 | int radeon_bo_evict_vram(struct radeon_device *rdev) |
| 341 | { | 229 | { |
| 342 | if (rdev->flags & RADEON_IS_IGP) { | 230 | if (rdev->flags & RADEON_IS_IGP) { |
| 343 | /* Useless to evict on IGP chips */ | 231 | /* Useless to evict on IGP chips */ |
| @@ -346,30 +234,32 @@ int radeon_object_evict_vram(struct radeon_device *rdev) | |||
| 346 | return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); | 234 | return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); |
| 347 | } | 235 | } |
| 348 | 236 | ||
| 349 | void radeon_object_force_delete(struct radeon_device *rdev) | 237 | void radeon_bo_force_delete(struct radeon_device *rdev) |
| 350 | { | 238 | { |
| 351 | struct radeon_object *robj, *n; | 239 | struct radeon_bo *bo, *n; |
| 352 | struct drm_gem_object *gobj; | 240 | struct drm_gem_object *gobj; |
| 353 | 241 | ||
| 354 | if (list_empty(&rdev->gem.objects)) { | 242 | if (list_empty(&rdev->gem.objects)) { |
| 355 | return; | 243 | return; |
| 356 | } | 244 | } |
| 357 | DRM_ERROR("Userspace still has active objects !\n"); | 245 | dev_err(rdev->dev, "Userspace still has active objects !\n"); |
| 358 | list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) { | 246 | list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { |
| 359 | mutex_lock(&rdev->ddev->struct_mutex); | 247 | mutex_lock(&rdev->ddev->struct_mutex); |
| 360 | gobj = robj->gobj; | 248 | gobj = bo->gobj; |
| 361 | DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n", | 249 | dev_err(rdev->dev, "%p %p %lu %lu force free\n", |
| 362 | gobj, robj, (unsigned long)gobj->size, | 250 | gobj, bo, (unsigned long)gobj->size, |
| 363 | *((unsigned long *)&gobj->refcount)); | 251 | *((unsigned long *)&gobj->refcount)); |
| 364 | list_del_init(&robj->list); | 252 | mutex_lock(&bo->rdev->gem.mutex); |
| 365 | radeon_object_unref(&robj); | 253 | list_del_init(&bo->list); |
| 254 | mutex_unlock(&bo->rdev->gem.mutex); | ||
| 255 | radeon_bo_unref(&bo); | ||
| 366 | gobj->driver_private = NULL; | 256 | gobj->driver_private = NULL; |
| 367 | drm_gem_object_unreference(gobj); | 257 | drm_gem_object_unreference(gobj); |
| 368 | mutex_unlock(&rdev->ddev->struct_mutex); | 258 | mutex_unlock(&rdev->ddev->struct_mutex); |
| 369 | } | 259 | } |
| 370 | } | 260 | } |
| 371 | 261 | ||
| 372 | int radeon_object_init(struct radeon_device *rdev) | 262 | int radeon_bo_init(struct radeon_device *rdev) |
| 373 | { | 263 | { |
| 374 | /* Add an MTRR for the VRAM */ | 264 | /* Add an MTRR for the VRAM */ |
| 375 | rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, | 265 | rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, |
| @@ -382,13 +272,13 @@ int radeon_object_init(struct radeon_device *rdev) | |||
| 382 | return radeon_ttm_init(rdev); | 272 | return radeon_ttm_init(rdev); |
| 383 | } | 273 | } |
| 384 | 274 | ||
| 385 | void radeon_object_fini(struct radeon_device *rdev) | 275 | void radeon_bo_fini(struct radeon_device *rdev) |
| 386 | { | 276 | { |
| 387 | radeon_ttm_fini(rdev); | 277 | radeon_ttm_fini(rdev); |
| 388 | } | 278 | } |
| 389 | 279 | ||
| 390 | void radeon_object_list_add_object(struct radeon_object_list *lobj, | 280 | void radeon_bo_list_add_object(struct radeon_bo_list *lobj, |
| 391 | struct list_head *head) | 281 | struct list_head *head) |
| 392 | { | 282 | { |
| 393 | if (lobj->wdomain) { | 283 | if (lobj->wdomain) { |
| 394 | list_add(&lobj->list, head); | 284 | list_add(&lobj->list, head); |
| @@ -397,72 +287,67 @@ void radeon_object_list_add_object(struct radeon_object_list *lobj, | |||
| 397 | } | 287 | } |
| 398 | } | 288 | } |
| 399 | 289 | ||
| 400 | int radeon_object_list_reserve(struct list_head *head) | 290 | int radeon_bo_list_reserve(struct list_head *head) |
| 401 | { | 291 | { |
| 402 | struct radeon_object_list *lobj; | 292 | struct radeon_bo_list *lobj; |
| 403 | int r; | 293 | int r; |
| 404 | 294 | ||
| 405 | list_for_each_entry(lobj, head, list){ | 295 | list_for_each_entry(lobj, head, list){ |
| 406 | if (!lobj->robj->pin_count) { | 296 | r = radeon_bo_reserve(lobj->bo, false); |
| 407 | r = radeon_object_reserve(lobj->robj, true); | 297 | if (unlikely(r != 0)) |
| 408 | if (unlikely(r != 0)) { | 298 | return r; |
| 409 | DRM_ERROR("radeon: failed to reserve object.\n"); | ||
| 410 | return r; | ||
| 411 | } | ||
| 412 | } else { | ||
| 413 | } | ||
| 414 | } | 299 | } |
| 415 | return 0; | 300 | return 0; |
| 416 | } | 301 | } |
| 417 | 302 | ||
| 418 | void radeon_object_list_unreserve(struct list_head *head) | 303 | void radeon_bo_list_unreserve(struct list_head *head) |
| 419 | { | 304 | { |
| 420 | struct radeon_object_list *lobj; | 305 | struct radeon_bo_list *lobj; |
| 421 | 306 | ||
| 422 | list_for_each_entry(lobj, head, list) { | 307 | list_for_each_entry(lobj, head, list) { |
| 423 | if (!lobj->robj->pin_count) { | 308 | /* only unreserve object we successfully reserved */ |
| 424 | radeon_object_unreserve(lobj->robj); | 309 | if (radeon_bo_is_reserved(lobj->bo)) |
| 425 | } | 310 | radeon_bo_unreserve(lobj->bo); |
| 426 | } | 311 | } |
| 427 | } | 312 | } |
| 428 | 313 | ||
| 429 | int radeon_object_list_validate(struct list_head *head, void *fence) | 314 | int radeon_bo_list_validate(struct list_head *head, void *fence) |
| 430 | { | 315 | { |
| 431 | struct radeon_object_list *lobj; | 316 | struct radeon_bo_list *lobj; |
| 432 | struct radeon_object *robj; | 317 | struct radeon_bo *bo; |
| 433 | struct radeon_fence *old_fence = NULL; | 318 | struct radeon_fence *old_fence = NULL; |
| 434 | int r; | 319 | int r; |
| 435 | 320 | ||
| 436 | r = radeon_object_list_reserve(head); | 321 | r = radeon_bo_list_reserve(head); |
| 437 | if (unlikely(r != 0)) { | 322 | if (unlikely(r != 0)) { |
| 438 | radeon_object_list_unreserve(head); | ||
| 439 | return r; | 323 | return r; |
| 440 | } | 324 | } |
| 441 | list_for_each_entry(lobj, head, list) { | 325 | list_for_each_entry(lobj, head, list) { |
| 442 | robj = lobj->robj; | 326 | bo = lobj->bo; |
| 443 | if (!robj->pin_count) { | 327 | if (!bo->pin_count) { |
| 444 | if (lobj->wdomain) { | 328 | if (lobj->wdomain) { |
| 445 | robj->tobj.proposed_placement = | 329 | bo->tbo.proposed_placement = |
| 446 | radeon_object_flags_from_domain(lobj->wdomain); | 330 | radeon_ttm_flags_from_domain(lobj->wdomain); |
| 447 | } else { | 331 | } else { |
| 448 | robj->tobj.proposed_placement = | 332 | bo->tbo.proposed_placement = |
| 449 | radeon_object_flags_from_domain(lobj->rdomain); | 333 | radeon_ttm_flags_from_domain(lobj->rdomain); |
| 450 | } | 334 | } |
| 451 | r = ttm_buffer_object_validate(&robj->tobj, | 335 | retry: |
| 452 | robj->tobj.proposed_placement, | 336 | r = ttm_buffer_object_validate(&bo->tbo, |
| 453 | true, false); | 337 | bo->tbo.proposed_placement, |
| 338 | true, false); | ||
| 454 | if (unlikely(r)) { | 339 | if (unlikely(r)) { |
| 455 | DRM_ERROR("radeon: failed to validate.\n"); | 340 | if (r == -ERESTART) |
| 341 | goto retry; | ||
| 456 | return r; | 342 | return r; |
| 457 | } | 343 | } |
| 458 | radeon_object_gpu_addr(robj); | ||
| 459 | } | 344 | } |
| 460 | lobj->gpu_offset = robj->gpu_addr; | 345 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); |
| 461 | lobj->tiling_flags = robj->tiling_flags; | 346 | lobj->tiling_flags = bo->tiling_flags; |
| 462 | if (fence) { | 347 | if (fence) { |
| 463 | old_fence = (struct radeon_fence *)robj->tobj.sync_obj; | 348 | old_fence = (struct radeon_fence *)bo->tbo.sync_obj; |
| 464 | robj->tobj.sync_obj = radeon_fence_ref(fence); | 349 | bo->tbo.sync_obj = radeon_fence_ref(fence); |
| 465 | robj->tobj.sync_obj_arg = NULL; | 350 | bo->tbo.sync_obj_arg = NULL; |
| 466 | } | 351 | } |
| 467 | if (old_fence) { | 352 | if (old_fence) { |
| 468 | radeon_fence_unref(&old_fence); | 353 | radeon_fence_unref(&old_fence); |
| @@ -471,51 +356,44 @@ int radeon_object_list_validate(struct list_head *head, void *fence) | |||
| 471 | return 0; | 356 | return 0; |
| 472 | } | 357 | } |
| 473 | 358 | ||
| 474 | void radeon_object_list_unvalidate(struct list_head *head) | 359 | void radeon_bo_list_unvalidate(struct list_head *head, void *fence) |
| 475 | { | 360 | { |
| 476 | struct radeon_object_list *lobj; | 361 | struct radeon_bo_list *lobj; |
| 477 | struct radeon_fence *old_fence = NULL; | 362 | struct radeon_fence *old_fence; |
| 478 | 363 | ||
| 479 | list_for_each_entry(lobj, head, list) { | 364 | if (fence) |
| 480 | old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj; | 365 | list_for_each_entry(lobj, head, list) { |
| 481 | lobj->robj->tobj.sync_obj = NULL; | 366 | old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj); |
| 482 | if (old_fence) { | 367 | if (old_fence == fence) { |
| 483 | radeon_fence_unref(&old_fence); | 368 | lobj->bo->tbo.sync_obj = NULL; |
| 369 | radeon_fence_unref(&old_fence); | ||
| 370 | } | ||
| 484 | } | 371 | } |
| 485 | } | 372 | radeon_bo_list_unreserve(head); |
| 486 | radeon_object_list_unreserve(head); | ||
| 487 | } | ||
| 488 | |||
| 489 | void radeon_object_list_clean(struct list_head *head) | ||
| 490 | { | ||
| 491 | radeon_object_list_unreserve(head); | ||
| 492 | } | 373 | } |
| 493 | 374 | ||
| 494 | int radeon_object_fbdev_mmap(struct radeon_object *robj, | 375 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, |
| 495 | struct vm_area_struct *vma) | 376 | struct vm_area_struct *vma) |
| 496 | { | 377 | { |
| 497 | return ttm_fbdev_mmap(vma, &robj->tobj); | 378 | return ttm_fbdev_mmap(vma, &bo->tbo); |
| 498 | } | 379 | } |
| 499 | 380 | ||
| 500 | unsigned long radeon_object_size(struct radeon_object *robj) | 381 | static int radeon_bo_get_surface_reg(struct radeon_bo *bo) |
| 501 | { | 382 | { |
| 502 | return robj->tobj.num_pages << PAGE_SHIFT; | 383 | struct radeon_device *rdev = bo->rdev; |
| 503 | } | ||
| 504 | |||
| 505 | int radeon_object_get_surface_reg(struct radeon_object *robj) | ||
| 506 | { | ||
| 507 | struct radeon_device *rdev = robj->rdev; | ||
| 508 | struct radeon_surface_reg *reg; | 384 | struct radeon_surface_reg *reg; |
| 509 | struct radeon_object *old_object; | 385 | struct radeon_bo *old_object; |
| 510 | int steal; | 386 | int steal; |
| 511 | int i; | 387 | int i; |
| 512 | 388 | ||
| 513 | if (!robj->tiling_flags) | 389 | BUG_ON(!atomic_read(&bo->tbo.reserved)); |
| 390 | |||
| 391 | if (!bo->tiling_flags) | ||
| 514 | return 0; | 392 | return 0; |
| 515 | 393 | ||
| 516 | if (robj->surface_reg >= 0) { | 394 | if (bo->surface_reg >= 0) { |
| 517 | reg = &rdev->surface_regs[robj->surface_reg]; | 395 | reg = &rdev->surface_regs[bo->surface_reg]; |
| 518 | i = robj->surface_reg; | 396 | i = bo->surface_reg; |
| 519 | goto out; | 397 | goto out; |
| 520 | } | 398 | } |
| 521 | 399 | ||
| @@ -523,10 +401,10 @@ int radeon_object_get_surface_reg(struct radeon_object *robj) | |||
| 523 | for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { | 401 | for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { |
| 524 | 402 | ||
| 525 | reg = &rdev->surface_regs[i]; | 403 | reg = &rdev->surface_regs[i]; |
| 526 | if (!reg->robj) | 404 | if (!reg->bo) |
| 527 | break; | 405 | break; |
| 528 | 406 | ||
| 529 | old_object = reg->robj; | 407 | old_object = reg->bo; |
| 530 | if (old_object->pin_count == 0) | 408 | if (old_object->pin_count == 0) |
| 531 | steal = i; | 409 | steal = i; |
| 532 | } | 410 | } |
| @@ -537,91 +415,101 @@ int radeon_object_get_surface_reg(struct radeon_object *robj) | |||
| 537 | return -ENOMEM; | 415 | return -ENOMEM; |
| 538 | /* find someone with a surface reg and nuke their BO */ | 416 | /* find someone with a surface reg and nuke their BO */ |
| 539 | reg = &rdev->surface_regs[steal]; | 417 | reg = &rdev->surface_regs[steal]; |
| 540 | old_object = reg->robj; | 418 | old_object = reg->bo; |
| 541 | /* blow away the mapping */ | 419 | /* blow away the mapping */ |
| 542 | DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); | 420 | DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); |
| 543 | ttm_bo_unmap_virtual(&old_object->tobj); | 421 | ttm_bo_unmap_virtual(&old_object->tbo); |
| 544 | old_object->surface_reg = -1; | 422 | old_object->surface_reg = -1; |
| 545 | i = steal; | 423 | i = steal; |
| 546 | } | 424 | } |
| 547 | 425 | ||
| 548 | robj->surface_reg = i; | 426 | bo->surface_reg = i; |
| 549 | reg->robj = robj; | 427 | reg->bo = bo; |
| 550 | 428 | ||
| 551 | out: | 429 | out: |
| 552 | radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch, | 430 | radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, |
| 553 | robj->tobj.mem.mm_node->start << PAGE_SHIFT, | 431 | bo->tbo.mem.mm_node->start << PAGE_SHIFT, |
| 554 | robj->tobj.num_pages << PAGE_SHIFT); | 432 | bo->tbo.num_pages << PAGE_SHIFT); |
| 555 | return 0; | 433 | return 0; |
| 556 | } | 434 | } |
| 557 | 435 | ||
| 558 | void radeon_object_clear_surface_reg(struct radeon_object *robj) | 436 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo) |
| 559 | { | 437 | { |
| 560 | struct radeon_device *rdev = robj->rdev; | 438 | struct radeon_device *rdev = bo->rdev; |
| 561 | struct radeon_surface_reg *reg; | 439 | struct radeon_surface_reg *reg; |
| 562 | 440 | ||
| 563 | if (robj->surface_reg == -1) | 441 | if (bo->surface_reg == -1) |
| 564 | return; | 442 | return; |
| 565 | 443 | ||
| 566 | reg = &rdev->surface_regs[robj->surface_reg]; | 444 | reg = &rdev->surface_regs[bo->surface_reg]; |
| 567 | radeon_clear_surface_reg(rdev, robj->surface_reg); | 445 | radeon_clear_surface_reg(rdev, bo->surface_reg); |
| 568 | 446 | ||
| 569 | reg->robj = NULL; | 447 | reg->bo = NULL; |
| 570 | robj->surface_reg = -1; | 448 | bo->surface_reg = -1; |
| 571 | } | 449 | } |
| 572 | 450 | ||
| 573 | void radeon_object_set_tiling_flags(struct radeon_object *robj, | 451 | int radeon_bo_set_tiling_flags(struct radeon_bo *bo, |
| 574 | uint32_t tiling_flags, uint32_t pitch) | 452 | uint32_t tiling_flags, uint32_t pitch) |
| 575 | { | 453 | { |
| 576 | robj->tiling_flags = tiling_flags; | 454 | int r; |
| 577 | robj->pitch = pitch; | 455 | |
| 456 | r = radeon_bo_reserve(bo, false); | ||
| 457 | if (unlikely(r != 0)) | ||
| 458 | return r; | ||
| 459 | bo->tiling_flags = tiling_flags; | ||
| 460 | bo->pitch = pitch; | ||
| 461 | radeon_bo_unreserve(bo); | ||
| 462 | return 0; | ||
| 578 | } | 463 | } |
| 579 | 464 | ||
| 580 | void radeon_object_get_tiling_flags(struct radeon_object *robj, | 465 | void radeon_bo_get_tiling_flags(struct radeon_bo *bo, |
| 581 | uint32_t *tiling_flags, | 466 | uint32_t *tiling_flags, |
| 582 | uint32_t *pitch) | 467 | uint32_t *pitch) |
| 583 | { | 468 | { |
| 469 | BUG_ON(!atomic_read(&bo->tbo.reserved)); | ||
| 584 | if (tiling_flags) | 470 | if (tiling_flags) |
| 585 | *tiling_flags = robj->tiling_flags; | 471 | *tiling_flags = bo->tiling_flags; |
| 586 | if (pitch) | 472 | if (pitch) |
| 587 | *pitch = robj->pitch; | 473 | *pitch = bo->pitch; |
| 588 | } | 474 | } |
| 589 | 475 | ||
| 590 | int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, | 476 | int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, |
| 591 | bool force_drop) | 477 | bool force_drop) |
| 592 | { | 478 | { |
| 593 | if (!(robj->tiling_flags & RADEON_TILING_SURFACE)) | 479 | BUG_ON(!atomic_read(&bo->tbo.reserved)); |
| 480 | |||
| 481 | if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) | ||
| 594 | return 0; | 482 | return 0; |
| 595 | 483 | ||
| 596 | if (force_drop) { | 484 | if (force_drop) { |
| 597 | radeon_object_clear_surface_reg(robj); | 485 | radeon_bo_clear_surface_reg(bo); |
| 598 | return 0; | 486 | return 0; |
| 599 | } | 487 | } |
| 600 | 488 | ||
| 601 | if (robj->tobj.mem.mem_type != TTM_PL_VRAM) { | 489 | if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { |
| 602 | if (!has_moved) | 490 | if (!has_moved) |
| 603 | return 0; | 491 | return 0; |
| 604 | 492 | ||
| 605 | if (robj->surface_reg >= 0) | 493 | if (bo->surface_reg >= 0) |
| 606 | radeon_object_clear_surface_reg(robj); | 494 | radeon_bo_clear_surface_reg(bo); |
| 607 | return 0; | 495 | return 0; |
| 608 | } | 496 | } |
| 609 | 497 | ||
| 610 | if ((robj->surface_reg >= 0) && !has_moved) | 498 | if ((bo->surface_reg >= 0) && !has_moved) |
| 611 | return 0; | 499 | return 0; |
| 612 | 500 | ||
| 613 | return radeon_object_get_surface_reg(robj); | 501 | return radeon_bo_get_surface_reg(bo); |
| 614 | } | 502 | } |
| 615 | 503 | ||
| 616 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, | 504 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, |
| 617 | struct ttm_mem_reg *mem) | 505 | struct ttm_mem_reg *mem) |
| 618 | { | 506 | { |
| 619 | struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); | 507 | struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); |
| 620 | radeon_object_check_tiling(robj, 0, 1); | 508 | radeon_bo_check_tiling(rbo, 0, 1); |
| 621 | } | 509 | } |
| 622 | 510 | ||
| 623 | void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | 511 | void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) |
| 624 | { | 512 | { |
| 625 | struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); | 513 | struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); |
| 626 | radeon_object_check_tiling(robj, 0, 0); | 514 | radeon_bo_check_tiling(rbo, 0, 0); |
| 627 | } | 515 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 10e8af6bb456..e9da13077e2f 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h | |||
| @@ -28,19 +28,152 @@ | |||
| 28 | #ifndef __RADEON_OBJECT_H__ | 28 | #ifndef __RADEON_OBJECT_H__ |
| 29 | #define __RADEON_OBJECT_H__ | 29 | #define __RADEON_OBJECT_H__ |
| 30 | 30 | ||
| 31 | #include <ttm/ttm_bo_api.h> | 31 | #include <drm/radeon_drm.h> |
| 32 | #include <ttm/ttm_bo_driver.h> | 32 | #include "radeon.h" |
| 33 | #include <ttm/ttm_placement.h> | ||
| 34 | #include <ttm/ttm_module.h> | ||
| 35 | 33 | ||
| 36 | /* | 34 | /** |
| 37 | * TTM. | 35 | * radeon_mem_type_to_domain - return domain corresponding to mem_type |
| 36 | * @mem_type: ttm memory type | ||
| 37 | * | ||
| 38 | * Returns corresponding domain of the ttm mem_type | ||
| 39 | */ | ||
| 40 | static inline unsigned radeon_mem_type_to_domain(u32 mem_type) | ||
| 41 | { | ||
| 42 | switch (mem_type) { | ||
| 43 | case TTM_PL_VRAM: | ||
| 44 | return RADEON_GEM_DOMAIN_VRAM; | ||
| 45 | case TTM_PL_TT: | ||
| 46 | return RADEON_GEM_DOMAIN_GTT; | ||
| 47 | case TTM_PL_SYSTEM: | ||
| 48 | return RADEON_GEM_DOMAIN_CPU; | ||
| 49 | default: | ||
| 50 | break; | ||
| 51 | } | ||
| 52 | return 0; | ||
| 53 | } | ||
| 54 | |||
| 55 | /** | ||
| 56 | * radeon_bo_reserve - reserve bo | ||
| 57 | * @bo: bo structure | ||
| 58 | * @no_wait: don't sleep while trying to reserve (return -EBUSY) | ||
| 59 | * | ||
| 60 | * Returns: | ||
| 61 | * -EBUSY: buffer is busy and @no_wait is true | ||
| 62 | * -ERESTART: A wait for the buffer to become unreserved was interrupted by | ||
| 63 | * a signal. Release all buffer reservations and return to user-space. | ||
| 64 | */ | ||
| 65 | static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait) | ||
| 66 | { | ||
| 67 | int r; | ||
| 68 | |||
| 69 | retry: | ||
| 70 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); | ||
| 71 | if (unlikely(r != 0)) { | ||
| 72 | if (r == -ERESTART) | ||
| 73 | goto retry; | ||
| 74 | dev_err(bo->rdev->dev, "%p reserve failed\n", bo); | ||
| 75 | return r; | ||
| 76 | } | ||
| 77 | return 0; | ||
| 78 | } | ||
| 79 | |||
| 80 | static inline void radeon_bo_unreserve(struct radeon_bo *bo) | ||
| 81 | { | ||
| 82 | ttm_bo_unreserve(&bo->tbo); | ||
| 83 | } | ||
| 84 | |||
| 85 | /** | ||
| 86 | * radeon_bo_gpu_offset - return GPU offset of bo | ||
| 87 | * @bo: radeon object for which we query the offset | ||
| 88 | * | ||
| 89 | * Returns current GPU offset of the object. | ||
| 90 | * | ||
| 91 | * Note: object should either be pinned or reserved when calling this | ||
| 92 | * function, it might be usefull to add check for this for debugging. | ||
| 93 | */ | ||
| 94 | static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo) | ||
| 95 | { | ||
| 96 | return bo->tbo.offset; | ||
| 97 | } | ||
| 98 | |||
| 99 | static inline unsigned long radeon_bo_size(struct radeon_bo *bo) | ||
| 100 | { | ||
| 101 | return bo->tbo.num_pages << PAGE_SHIFT; | ||
| 102 | } | ||
| 103 | |||
| 104 | static inline bool radeon_bo_is_reserved(struct radeon_bo *bo) | ||
| 105 | { | ||
| 106 | return !!atomic_read(&bo->tbo.reserved); | ||
| 107 | } | ||
| 108 | |||
| 109 | /** | ||
| 110 | * radeon_bo_mmap_offset - return mmap offset of bo | ||
| 111 | * @bo: radeon object for which we query the offset | ||
| 112 | * | ||
| 113 | * Returns mmap offset of the object. | ||
| 114 | * | ||
| 115 | * Note: addr_space_offset is constant after ttm bo init thus isn't protected | ||
| 116 | * by any lock. | ||
| 38 | */ | 117 | */ |
| 39 | struct radeon_mman { | 118 | static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo) |
| 40 | struct ttm_bo_global_ref bo_global_ref; | 119 | { |
| 41 | struct ttm_global_reference mem_global_ref; | 120 | return bo->tbo.addr_space_offset; |
| 42 | bool mem_global_referenced; | 121 | } |
| 43 | struct ttm_bo_device bdev; | 122 | |
| 44 | }; | 123 | static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, |
| 124 | bool no_wait) | ||
| 125 | { | ||
| 126 | int r; | ||
| 127 | |||
| 128 | retry: | ||
| 129 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); | ||
| 130 | if (unlikely(r != 0)) { | ||
| 131 | if (r == -ERESTART) | ||
| 132 | goto retry; | ||
| 133 | dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo); | ||
| 134 | return r; | ||
| 135 | } | ||
| 136 | spin_lock(&bo->tbo.lock); | ||
| 137 | if (mem_type) | ||
| 138 | *mem_type = bo->tbo.mem.mem_type; | ||
| 139 | if (bo->tbo.sync_obj) | ||
| 140 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); | ||
| 141 | spin_unlock(&bo->tbo.lock); | ||
| 142 | ttm_bo_unreserve(&bo->tbo); | ||
| 143 | if (unlikely(r == -ERESTART)) | ||
| 144 | goto retry; | ||
| 145 | return r; | ||
| 146 | } | ||
| 147 | |||
| 148 | extern int radeon_bo_create(struct radeon_device *rdev, | ||
| 149 | struct drm_gem_object *gobj, unsigned long size, | ||
| 150 | bool kernel, u32 domain, | ||
| 151 | struct radeon_bo **bo_ptr); | ||
| 152 | extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); | ||
| 153 | extern void radeon_bo_kunmap(struct radeon_bo *bo); | ||
| 154 | extern void radeon_bo_unref(struct radeon_bo **bo); | ||
| 155 | extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr); | ||
| 156 | extern int radeon_bo_unpin(struct radeon_bo *bo); | ||
| 157 | extern int radeon_bo_evict_vram(struct radeon_device *rdev); | ||
| 158 | extern void radeon_bo_force_delete(struct radeon_device *rdev); | ||
| 159 | extern int radeon_bo_init(struct radeon_device *rdev); | ||
| 160 | extern void radeon_bo_fini(struct radeon_device *rdev); | ||
| 161 | extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, | ||
| 162 | struct list_head *head); | ||
| 163 | extern int radeon_bo_list_reserve(struct list_head *head); | ||
| 164 | extern void radeon_bo_list_unreserve(struct list_head *head); | ||
| 165 | extern int radeon_bo_list_validate(struct list_head *head, void *fence); | ||
| 166 | extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence); | ||
| 167 | extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, | ||
| 168 | struct vm_area_struct *vma); | ||
| 169 | extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, | ||
| 170 | u32 tiling_flags, u32 pitch); | ||
| 171 | extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo, | ||
| 172 | u32 *tiling_flags, u32 *pitch); | ||
| 173 | extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, | ||
| 174 | bool force_drop); | ||
| 175 | extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, | ||
| 176 | struct ttm_mem_reg *mem); | ||
| 177 | extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); | ||
| 45 | 178 | ||
| 46 | #endif | 179 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 46146c6a2a06..34b08d307c81 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
| @@ -27,7 +27,7 @@ int radeon_debugfs_pm_init(struct radeon_device *rdev); | |||
| 27 | int radeon_pm_init(struct radeon_device *rdev) | 27 | int radeon_pm_init(struct radeon_device *rdev) |
| 28 | { | 28 | { |
| 29 | if (radeon_debugfs_pm_init(rdev)) { | 29 | if (radeon_debugfs_pm_init(rdev)) { |
| 30 | DRM_ERROR("Failed to register debugfs file for CP !\n"); | 30 | DRM_ERROR("Failed to register debugfs file for PM!\n"); |
| 31 | } | 31 | } |
| 32 | 32 | ||
| 33 | return 0; | 33 | return 0; |
| @@ -44,8 +44,8 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) | |||
| 44 | struct drm_device *dev = node->minor->dev; | 44 | struct drm_device *dev = node->minor->dev; |
| 45 | struct radeon_device *rdev = dev->dev_private; | 45 | struct radeon_device *rdev = dev->dev_private; |
| 46 | 46 | ||
| 47 | seq_printf(m, "engine clock: %u0 Hz\n", radeon_get_engine_clock(rdev)); | 47 | seq_printf(m, "engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); |
| 48 | seq_printf(m, "memory clock: %u0 Hz\n", radeon_get_memory_clock(rdev)); | 48 | seq_printf(m, "memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); |
| 49 | 49 | ||
| 50 | return 0; | 50 | return 0; |
| 51 | } | 51 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index 29ab75903ec1..c4c41c8d908c 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h | |||
| @@ -1051,20 +1051,25 @@ | |||
| 1051 | 1051 | ||
| 1052 | /* Multimedia I2C bus */ | 1052 | /* Multimedia I2C bus */ |
| 1053 | #define RADEON_I2C_CNTL_0 0x0090 | 1053 | #define RADEON_I2C_CNTL_0 0x0090 |
| 1054 | #define RADEON_I2C_DONE (1<<0) | 1054 | #define RADEON_I2C_DONE (1 << 0) |
| 1055 | #define RADEON_I2C_NACK (1<<1) | 1055 | #define RADEON_I2C_NACK (1 << 1) |
| 1056 | #define RADEON_I2C_HALT (1<<2) | 1056 | #define RADEON_I2C_HALT (1 << 2) |
| 1057 | #define RADEON_I2C_SOFT_RST (1<<5) | 1057 | #define RADEON_I2C_SOFT_RST (1 << 5) |
| 1058 | #define RADEON_I2C_DRIVE_EN (1<<6) | 1058 | #define RADEON_I2C_DRIVE_EN (1 << 6) |
| 1059 | #define RADEON_I2C_DRIVE_SEL (1<<7) | 1059 | #define RADEON_I2C_DRIVE_SEL (1 << 7) |
| 1060 | #define RADEON_I2C_START (1<<8) | 1060 | #define RADEON_I2C_START (1 << 8) |
| 1061 | #define RADEON_I2C_STOP (1<<9) | 1061 | #define RADEON_I2C_STOP (1 << 9) |
| 1062 | #define RADEON_I2C_RECEIVE (1<<10) | 1062 | #define RADEON_I2C_RECEIVE (1 << 10) |
| 1063 | #define RADEON_I2C_ABORT (1<<11) | 1063 | #define RADEON_I2C_ABORT (1 << 11) |
| 1064 | #define RADEON_I2C_GO (1<<12) | 1064 | #define RADEON_I2C_GO (1 << 12) |
| 1065 | #define RADEON_I2C_PRESCALE_SHIFT 16 | ||
| 1065 | #define RADEON_I2C_CNTL_1 0x0094 | 1066 | #define RADEON_I2C_CNTL_1 0x0094 |
| 1066 | #define RADEON_I2C_SEL (1<<16) | 1067 | #define RADEON_I2C_DATA_COUNT_SHIFT 0 |
| 1067 | #define RADEON_I2C_EN (1<<17) | 1068 | #define RADEON_I2C_ADDR_COUNT_SHIFT 4 |
| 1069 | #define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8 | ||
| 1070 | #define RADEON_I2C_SEL (1 << 16) | ||
| 1071 | #define RADEON_I2C_EN (1 << 17) | ||
| 1072 | #define RADEON_I2C_TIME_LIMIT_SHIFT 24 | ||
| 1068 | #define RADEON_I2C_DATA 0x0098 | 1073 | #define RADEON_I2C_DATA 0x0098 |
| 1069 | 1074 | ||
| 1070 | #define RADEON_DVI_I2C_CNTL_0 0x02e0 | 1075 | #define RADEON_DVI_I2C_CNTL_0 0x02e0 |
| @@ -1072,7 +1077,7 @@ | |||
| 1072 | # define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */ | 1077 | # define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */ |
| 1073 | # define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */ | 1078 | # define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */ |
| 1074 | # define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */ | 1079 | # define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */ |
| 1075 | #define RADEON_DVI_I2C_CNTL_1 0x02e4 /* ? */ | 1080 | #define RADEON_DVI_I2C_CNTL_1 0x02e4 |
| 1076 | #define RADEON_DVI_I2C_DATA 0x02e8 | 1081 | #define RADEON_DVI_I2C_DATA 0x02e8 |
| 1077 | 1082 | ||
| 1078 | #define RADEON_INTERRUPT_LINE 0x0f3c /* PCI */ | 1083 | #define RADEON_INTERRUPT_LINE 0x0f3c /* PCI */ |
| @@ -1143,14 +1148,15 @@ | |||
| 1143 | # define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13) | 1148 | # define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13) |
| 1144 | # define RADEON_MC_MCLK_DYN_ENABLE (1 << 14) | 1149 | # define RADEON_MC_MCLK_DYN_ENABLE (1 << 14) |
| 1145 | # define RADEON_IO_MCLK_DYN_ENABLE (1 << 15) | 1150 | # define RADEON_IO_MCLK_DYN_ENABLE (1 << 15) |
| 1146 | #define RADEON_LCD_GPIO_MASK 0x01a0 | 1151 | #define RADEON_GPIOPAD_MASK 0x0198 |
| 1152 | #define RADEON_GPIOPAD_A 0x019c | ||
| 1147 | #define RADEON_GPIOPAD_EN 0x01a0 | 1153 | #define RADEON_GPIOPAD_EN 0x01a0 |
| 1154 | #define RADEON_GPIOPAD_Y 0x01a4 | ||
| 1155 | #define RADEON_LCD_GPIO_MASK 0x01a0 | ||
| 1148 | #define RADEON_LCD_GPIO_Y_REG 0x01a4 | 1156 | #define RADEON_LCD_GPIO_Y_REG 0x01a4 |
| 1149 | #define RADEON_MDGPIO_A_REG 0x01ac | 1157 | #define RADEON_MDGPIO_A_REG 0x01ac |
| 1150 | #define RADEON_MDGPIO_EN_REG 0x01b0 | 1158 | #define RADEON_MDGPIO_EN_REG 0x01b0 |
| 1151 | #define RADEON_MDGPIO_MASK 0x0198 | 1159 | #define RADEON_MDGPIO_MASK 0x0198 |
| 1152 | #define RADEON_GPIOPAD_MASK 0x0198 | ||
| 1153 | #define RADEON_GPIOPAD_A 0x019c | ||
| 1154 | #define RADEON_MDGPIO_Y_REG 0x01b4 | 1160 | #define RADEON_MDGPIO_Y_REG 0x01b4 |
| 1155 | #define RADEON_MEM_ADDR_CONFIG 0x0148 | 1161 | #define RADEON_MEM_ADDR_CONFIG 0x0148 |
| 1156 | #define RADEON_MEM_BASE 0x0f10 /* PCI */ | 1162 | #define RADEON_MEM_BASE 0x0f10 /* PCI */ |
| @@ -1360,6 +1366,9 @@ | |||
| 1360 | #define RADEON_OVR_CLR 0x0230 | 1366 | #define RADEON_OVR_CLR 0x0230 |
| 1361 | #define RADEON_OVR_WID_LEFT_RIGHT 0x0234 | 1367 | #define RADEON_OVR_WID_LEFT_RIGHT 0x0234 |
| 1362 | #define RADEON_OVR_WID_TOP_BOTTOM 0x0238 | 1368 | #define RADEON_OVR_WID_TOP_BOTTOM 0x0238 |
| 1369 | #define RADEON_OVR2_CLR 0x0330 | ||
| 1370 | #define RADEON_OVR2_WID_LEFT_RIGHT 0x0334 | ||
| 1371 | #define RADEON_OVR2_WID_TOP_BOTTOM 0x0338 | ||
| 1363 | 1372 | ||
| 1364 | /* first capture unit */ | 1373 | /* first capture unit */ |
| 1365 | 1374 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 747b4bffb84b..4d12b2d17b4d 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
| @@ -165,19 +165,24 @@ int radeon_ib_pool_init(struct radeon_device *rdev) | |||
| 165 | return 0; | 165 | return 0; |
| 166 | /* Allocate 1M object buffer */ | 166 | /* Allocate 1M object buffer */ |
| 167 | INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs); | 167 | INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs); |
| 168 | r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, | 168 | r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, |
| 169 | true, RADEON_GEM_DOMAIN_GTT, | 169 | true, RADEON_GEM_DOMAIN_GTT, |
| 170 | false, &rdev->ib_pool.robj); | 170 | &rdev->ib_pool.robj); |
| 171 | if (r) { | 171 | if (r) { |
| 172 | DRM_ERROR("radeon: failed to ib pool (%d).\n", r); | 172 | DRM_ERROR("radeon: failed to ib pool (%d).\n", r); |
| 173 | return r; | 173 | return r; |
| 174 | } | 174 | } |
| 175 | r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr); | 175 | r = radeon_bo_reserve(rdev->ib_pool.robj, false); |
| 176 | if (unlikely(r != 0)) | ||
| 177 | return r; | ||
| 178 | r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr); | ||
| 176 | if (r) { | 179 | if (r) { |
| 180 | radeon_bo_unreserve(rdev->ib_pool.robj); | ||
| 177 | DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r); | 181 | DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r); |
| 178 | return r; | 182 | return r; |
| 179 | } | 183 | } |
| 180 | r = radeon_object_kmap(rdev->ib_pool.robj, &ptr); | 184 | r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr); |
| 185 | radeon_bo_unreserve(rdev->ib_pool.robj); | ||
| 181 | if (r) { | 186 | if (r) { |
| 182 | DRM_ERROR("radeon: failed to map ib poll (%d).\n", r); | 187 | DRM_ERROR("radeon: failed to map ib poll (%d).\n", r); |
| 183 | return r; | 188 | return r; |
| @@ -203,14 +208,21 @@ int radeon_ib_pool_init(struct radeon_device *rdev) | |||
| 203 | 208 | ||
| 204 | void radeon_ib_pool_fini(struct radeon_device *rdev) | 209 | void radeon_ib_pool_fini(struct radeon_device *rdev) |
| 205 | { | 210 | { |
| 211 | int r; | ||
| 212 | |||
| 206 | if (!rdev->ib_pool.ready) { | 213 | if (!rdev->ib_pool.ready) { |
| 207 | return; | 214 | return; |
| 208 | } | 215 | } |
| 209 | mutex_lock(&rdev->ib_pool.mutex); | 216 | mutex_lock(&rdev->ib_pool.mutex); |
| 210 | bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); | 217 | bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); |
| 211 | if (rdev->ib_pool.robj) { | 218 | if (rdev->ib_pool.robj) { |
| 212 | radeon_object_kunmap(rdev->ib_pool.robj); | 219 | r = radeon_bo_reserve(rdev->ib_pool.robj, false); |
| 213 | radeon_object_unref(&rdev->ib_pool.robj); | 220 | if (likely(r == 0)) { |
| 221 | radeon_bo_kunmap(rdev->ib_pool.robj); | ||
| 222 | radeon_bo_unpin(rdev->ib_pool.robj); | ||
| 223 | radeon_bo_unreserve(rdev->ib_pool.robj); | ||
| 224 | } | ||
| 225 | radeon_bo_unref(&rdev->ib_pool.robj); | ||
| 214 | rdev->ib_pool.robj = NULL; | 226 | rdev->ib_pool.robj = NULL; |
| 215 | } | 227 | } |
| 216 | mutex_unlock(&rdev->ib_pool.mutex); | 228 | mutex_unlock(&rdev->ib_pool.mutex); |
| @@ -288,29 +300,28 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) | |||
| 288 | rdev->cp.ring_size = ring_size; | 300 | rdev->cp.ring_size = ring_size; |
| 289 | /* Allocate ring buffer */ | 301 | /* Allocate ring buffer */ |
| 290 | if (rdev->cp.ring_obj == NULL) { | 302 | if (rdev->cp.ring_obj == NULL) { |
| 291 | r = radeon_object_create(rdev, NULL, rdev->cp.ring_size, | 303 | r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true, |
| 292 | true, | 304 | RADEON_GEM_DOMAIN_GTT, |
| 293 | RADEON_GEM_DOMAIN_GTT, | 305 | &rdev->cp.ring_obj); |
| 294 | false, | ||
| 295 | &rdev->cp.ring_obj); | ||
| 296 | if (r) { | 306 | if (r) { |
| 297 | DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r); | 307 | dev_err(rdev->dev, "(%d) ring create failed\n", r); |
| 298 | mutex_unlock(&rdev->cp.mutex); | ||
| 299 | return r; | 308 | return r; |
| 300 | } | 309 | } |
| 301 | r = radeon_object_pin(rdev->cp.ring_obj, | 310 | r = radeon_bo_reserve(rdev->cp.ring_obj, false); |
| 302 | RADEON_GEM_DOMAIN_GTT, | 311 | if (unlikely(r != 0)) |
| 303 | &rdev->cp.gpu_addr); | 312 | return r; |
| 313 | r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT, | ||
| 314 | &rdev->cp.gpu_addr); | ||
| 304 | if (r) { | 315 | if (r) { |
| 305 | DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r); | 316 | radeon_bo_unreserve(rdev->cp.ring_obj); |
| 306 | mutex_unlock(&rdev->cp.mutex); | 317 | dev_err(rdev->dev, "(%d) ring pin failed\n", r); |
| 307 | return r; | 318 | return r; |
| 308 | } | 319 | } |
| 309 | r = radeon_object_kmap(rdev->cp.ring_obj, | 320 | r = radeon_bo_kmap(rdev->cp.ring_obj, |
| 310 | (void **)&rdev->cp.ring); | 321 | (void **)&rdev->cp.ring); |
| 322 | radeon_bo_unreserve(rdev->cp.ring_obj); | ||
| 311 | if (r) { | 323 | if (r) { |
| 312 | DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r); | 324 | dev_err(rdev->dev, "(%d) ring map failed\n", r); |
| 313 | mutex_unlock(&rdev->cp.mutex); | ||
| 314 | return r; | 325 | return r; |
| 315 | } | 326 | } |
| 316 | } | 327 | } |
| @@ -321,11 +332,17 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) | |||
| 321 | 332 | ||
| 322 | void radeon_ring_fini(struct radeon_device *rdev) | 333 | void radeon_ring_fini(struct radeon_device *rdev) |
| 323 | { | 334 | { |
| 335 | int r; | ||
| 336 | |||
| 324 | mutex_lock(&rdev->cp.mutex); | 337 | mutex_lock(&rdev->cp.mutex); |
| 325 | if (rdev->cp.ring_obj) { | 338 | if (rdev->cp.ring_obj) { |
| 326 | radeon_object_kunmap(rdev->cp.ring_obj); | 339 | r = radeon_bo_reserve(rdev->cp.ring_obj, false); |
| 327 | radeon_object_unpin(rdev->cp.ring_obj); | 340 | if (likely(r == 0)) { |
| 328 | radeon_object_unref(&rdev->cp.ring_obj); | 341 | radeon_bo_kunmap(rdev->cp.ring_obj); |
| 342 | radeon_bo_unpin(rdev->cp.ring_obj); | ||
| 343 | radeon_bo_unreserve(rdev->cp.ring_obj); | ||
| 344 | } | ||
| 345 | radeon_bo_unref(&rdev->cp.ring_obj); | ||
| 329 | rdev->cp.ring = NULL; | 346 | rdev->cp.ring = NULL; |
| 330 | rdev->cp.ring_obj = NULL; | 347 | rdev->cp.ring_obj = NULL; |
| 331 | } | 348 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index f8a465d9a1cf..391c973ec4db 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c | |||
| @@ -30,8 +30,8 @@ | |||
| 30 | /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ | 30 | /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ |
| 31 | void radeon_test_moves(struct radeon_device *rdev) | 31 | void radeon_test_moves(struct radeon_device *rdev) |
| 32 | { | 32 | { |
| 33 | struct radeon_object *vram_obj = NULL; | 33 | struct radeon_bo *vram_obj = NULL; |
| 34 | struct radeon_object **gtt_obj = NULL; | 34 | struct radeon_bo **gtt_obj = NULL; |
| 35 | struct radeon_fence *fence = NULL; | 35 | struct radeon_fence *fence = NULL; |
| 36 | uint64_t gtt_addr, vram_addr; | 36 | uint64_t gtt_addr, vram_addr; |
| 37 | unsigned i, n, size; | 37 | unsigned i, n, size; |
| @@ -52,38 +52,42 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
| 52 | goto out_cleanup; | 52 | goto out_cleanup; |
| 53 | } | 53 | } |
| 54 | 54 | ||
| 55 | r = radeon_object_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM, | 55 | r = radeon_bo_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM, |
| 56 | false, &vram_obj); | 56 | &vram_obj); |
| 57 | if (r) { | 57 | if (r) { |
| 58 | DRM_ERROR("Failed to create VRAM object\n"); | 58 | DRM_ERROR("Failed to create VRAM object\n"); |
| 59 | goto out_cleanup; | 59 | goto out_cleanup; |
| 60 | } | 60 | } |
| 61 | 61 | r = radeon_bo_reserve(vram_obj, false); | |
| 62 | r = radeon_object_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); | 62 | if (unlikely(r != 0)) |
| 63 | goto out_cleanup; | ||
| 64 | r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); | ||
| 63 | if (r) { | 65 | if (r) { |
| 64 | DRM_ERROR("Failed to pin VRAM object\n"); | 66 | DRM_ERROR("Failed to pin VRAM object\n"); |
| 65 | goto out_cleanup; | 67 | goto out_cleanup; |
| 66 | } | 68 | } |
| 67 | |||
| 68 | for (i = 0; i < n; i++) { | 69 | for (i = 0; i < n; i++) { |
| 69 | void *gtt_map, *vram_map; | 70 | void *gtt_map, *vram_map; |
| 70 | void **gtt_start, **gtt_end; | 71 | void **gtt_start, **gtt_end; |
| 71 | void **vram_start, **vram_end; | 72 | void **vram_start, **vram_end; |
| 72 | 73 | ||
| 73 | r = radeon_object_create(rdev, NULL, size, true, | 74 | r = radeon_bo_create(rdev, NULL, size, true, |
| 74 | RADEON_GEM_DOMAIN_GTT, false, gtt_obj + i); | 75 | RADEON_GEM_DOMAIN_GTT, gtt_obj + i); |
| 75 | if (r) { | 76 | if (r) { |
| 76 | DRM_ERROR("Failed to create GTT object %d\n", i); | 77 | DRM_ERROR("Failed to create GTT object %d\n", i); |
| 77 | goto out_cleanup; | 78 | goto out_cleanup; |
| 78 | } | 79 | } |
| 79 | 80 | ||
| 80 | r = radeon_object_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr); | 81 | r = radeon_bo_reserve(gtt_obj[i], false); |
| 82 | if (unlikely(r != 0)) | ||
| 83 | goto out_cleanup; | ||
| 84 | r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr); | ||
| 81 | if (r) { | 85 | if (r) { |
| 82 | DRM_ERROR("Failed to pin GTT object %d\n", i); | 86 | DRM_ERROR("Failed to pin GTT object %d\n", i); |
| 83 | goto out_cleanup; | 87 | goto out_cleanup; |
| 84 | } | 88 | } |
| 85 | 89 | ||
| 86 | r = radeon_object_kmap(gtt_obj[i], >t_map); | 90 | r = radeon_bo_kmap(gtt_obj[i], >t_map); |
| 87 | if (r) { | 91 | if (r) { |
| 88 | DRM_ERROR("Failed to map GTT object %d\n", i); | 92 | DRM_ERROR("Failed to map GTT object %d\n", i); |
| 89 | goto out_cleanup; | 93 | goto out_cleanup; |
| @@ -94,7 +98,7 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
| 94 | gtt_start++) | 98 | gtt_start++) |
| 95 | *gtt_start = gtt_start; | 99 | *gtt_start = gtt_start; |
| 96 | 100 | ||
| 97 | radeon_object_kunmap(gtt_obj[i]); | 101 | radeon_bo_kunmap(gtt_obj[i]); |
| 98 | 102 | ||
| 99 | r = radeon_fence_create(rdev, &fence); | 103 | r = radeon_fence_create(rdev, &fence); |
| 100 | if (r) { | 104 | if (r) { |
| @@ -116,7 +120,7 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
| 116 | 120 | ||
| 117 | radeon_fence_unref(&fence); | 121 | radeon_fence_unref(&fence); |
| 118 | 122 | ||
| 119 | r = radeon_object_kmap(vram_obj, &vram_map); | 123 | r = radeon_bo_kmap(vram_obj, &vram_map); |
| 120 | if (r) { | 124 | if (r) { |
| 121 | DRM_ERROR("Failed to map VRAM object after copy %d\n", i); | 125 | DRM_ERROR("Failed to map VRAM object after copy %d\n", i); |
| 122 | goto out_cleanup; | 126 | goto out_cleanup; |
| @@ -131,13 +135,13 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
| 131 | "expected 0x%p (GTT map 0x%p-0x%p)\n", | 135 | "expected 0x%p (GTT map 0x%p-0x%p)\n", |
| 132 | i, *vram_start, gtt_start, gtt_map, | 136 | i, *vram_start, gtt_start, gtt_map, |
| 133 | gtt_end); | 137 | gtt_end); |
| 134 | radeon_object_kunmap(vram_obj); | 138 | radeon_bo_kunmap(vram_obj); |
| 135 | goto out_cleanup; | 139 | goto out_cleanup; |
| 136 | } | 140 | } |
| 137 | *vram_start = vram_start; | 141 | *vram_start = vram_start; |
| 138 | } | 142 | } |
| 139 | 143 | ||
| 140 | radeon_object_kunmap(vram_obj); | 144 | radeon_bo_kunmap(vram_obj); |
| 141 | 145 | ||
| 142 | r = radeon_fence_create(rdev, &fence); | 146 | r = radeon_fence_create(rdev, &fence); |
| 143 | if (r) { | 147 | if (r) { |
| @@ -159,7 +163,7 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
| 159 | 163 | ||
| 160 | radeon_fence_unref(&fence); | 164 | radeon_fence_unref(&fence); |
| 161 | 165 | ||
| 162 | r = radeon_object_kmap(gtt_obj[i], >t_map); | 166 | r = radeon_bo_kmap(gtt_obj[i], >t_map); |
| 163 | if (r) { | 167 | if (r) { |
| 164 | DRM_ERROR("Failed to map GTT object after copy %d\n", i); | 168 | DRM_ERROR("Failed to map GTT object after copy %d\n", i); |
| 165 | goto out_cleanup; | 169 | goto out_cleanup; |
| @@ -174,12 +178,12 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
| 174 | "expected 0x%p (VRAM map 0x%p-0x%p)\n", | 178 | "expected 0x%p (VRAM map 0x%p-0x%p)\n", |
| 175 | i, *gtt_start, vram_start, vram_map, | 179 | i, *gtt_start, vram_start, vram_map, |
| 176 | vram_end); | 180 | vram_end); |
| 177 | radeon_object_kunmap(gtt_obj[i]); | 181 | radeon_bo_kunmap(gtt_obj[i]); |
| 178 | goto out_cleanup; | 182 | goto out_cleanup; |
| 179 | } | 183 | } |
| 180 | } | 184 | } |
| 181 | 185 | ||
| 182 | radeon_object_kunmap(gtt_obj[i]); | 186 | radeon_bo_kunmap(gtt_obj[i]); |
| 183 | 187 | ||
| 184 | DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", | 188 | DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", |
| 185 | gtt_addr - rdev->mc.gtt_location); | 189 | gtt_addr - rdev->mc.gtt_location); |
| @@ -187,14 +191,20 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
| 187 | 191 | ||
| 188 | out_cleanup: | 192 | out_cleanup: |
| 189 | if (vram_obj) { | 193 | if (vram_obj) { |
| 190 | radeon_object_unpin(vram_obj); | 194 | if (radeon_bo_is_reserved(vram_obj)) { |
| 191 | radeon_object_unref(&vram_obj); | 195 | radeon_bo_unpin(vram_obj); |
| 196 | radeon_bo_unreserve(vram_obj); | ||
| 197 | } | ||
| 198 | radeon_bo_unref(&vram_obj); | ||
| 192 | } | 199 | } |
| 193 | if (gtt_obj) { | 200 | if (gtt_obj) { |
| 194 | for (i = 0; i < n; i++) { | 201 | for (i = 0; i < n; i++) { |
| 195 | if (gtt_obj[i]) { | 202 | if (gtt_obj[i]) { |
| 196 | radeon_object_unpin(gtt_obj[i]); | 203 | if (radeon_bo_is_reserved(gtt_obj[i])) { |
| 197 | radeon_object_unref(>t_obj[i]); | 204 | radeon_bo_unpin(gtt_obj[i]); |
| 205 | radeon_bo_unreserve(gtt_obj[i]); | ||
| 206 | } | ||
| 207 | radeon_bo_unref(>t_obj[i]); | ||
| 198 | } | 208 | } |
| 199 | } | 209 | } |
| 200 | kfree(gtt_obj); | 210 | kfree(gtt_obj); |
| @@ -206,4 +216,3 @@ out_cleanup: | |||
| 206 | printk(KERN_WARNING "Error while testing BO move.\n"); | 216 | printk(KERN_WARNING "Error while testing BO move.\n"); |
| 207 | } | 217 | } |
| 208 | } | 218 | } |
| 209 | |||
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 1381e06d6af3..bdb46c8cadd1 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
| @@ -150,7 +150,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
| 150 | man->default_caching = TTM_PL_FLAG_CACHED; | 150 | man->default_caching = TTM_PL_FLAG_CACHED; |
| 151 | break; | 151 | break; |
| 152 | case TTM_PL_TT: | 152 | case TTM_PL_TT: |
| 153 | man->gpu_offset = 0; | 153 | man->gpu_offset = rdev->mc.gtt_location; |
| 154 | man->available_caching = TTM_PL_MASK_CACHING; | 154 | man->available_caching = TTM_PL_MASK_CACHING; |
| 155 | man->default_caching = TTM_PL_FLAG_CACHED; | 155 | man->default_caching = TTM_PL_FLAG_CACHED; |
| 156 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; | 156 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; |
| @@ -180,7 +180,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
| 180 | break; | 180 | break; |
| 181 | case TTM_PL_VRAM: | 181 | case TTM_PL_VRAM: |
| 182 | /* "On-card" video ram */ | 182 | /* "On-card" video ram */ |
| 183 | man->gpu_offset = 0; | 183 | man->gpu_offset = rdev->mc.vram_location; |
| 184 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | 184 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
| 185 | TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | | 185 | TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | |
| 186 | TTM_MEMTYPE_FLAG_MAPPABLE; | 186 | TTM_MEMTYPE_FLAG_MAPPABLE; |
| @@ -482,27 +482,31 @@ int radeon_ttm_init(struct radeon_device *rdev) | |||
| 482 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); | 482 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); |
| 483 | return r; | 483 | return r; |
| 484 | } | 484 | } |
| 485 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0, | 485 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, |
| 486 | ((rdev->mc.real_vram_size) >> PAGE_SHIFT)); | 486 | 0, rdev->mc.real_vram_size >> PAGE_SHIFT); |
| 487 | if (r) { | 487 | if (r) { |
| 488 | DRM_ERROR("Failed initializing VRAM heap.\n"); | 488 | DRM_ERROR("Failed initializing VRAM heap.\n"); |
| 489 | return r; | 489 | return r; |
| 490 | } | 490 | } |
| 491 | r = radeon_object_create(rdev, NULL, 256 * 1024, true, | 491 | r = radeon_bo_create(rdev, NULL, 256 * 1024, true, |
| 492 | RADEON_GEM_DOMAIN_VRAM, false, | 492 | RADEON_GEM_DOMAIN_VRAM, |
| 493 | &rdev->stollen_vga_memory); | 493 | &rdev->stollen_vga_memory); |
| 494 | if (r) { | 494 | if (r) { |
| 495 | return r; | 495 | return r; |
| 496 | } | 496 | } |
| 497 | r = radeon_object_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL); | 497 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); |
| 498 | if (r) | ||
| 499 | return r; | ||
| 500 | r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL); | ||
| 501 | radeon_bo_unreserve(rdev->stollen_vga_memory); | ||
| 498 | if (r) { | 502 | if (r) { |
| 499 | radeon_object_unref(&rdev->stollen_vga_memory); | 503 | radeon_bo_unref(&rdev->stollen_vga_memory); |
| 500 | return r; | 504 | return r; |
| 501 | } | 505 | } |
| 502 | DRM_INFO("radeon: %uM of VRAM memory ready\n", | 506 | DRM_INFO("radeon: %uM of VRAM memory ready\n", |
| 503 | (unsigned)rdev->mc.real_vram_size / (1024 * 1024)); | 507 | (unsigned)rdev->mc.real_vram_size / (1024 * 1024)); |
| 504 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0, | 508 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, |
| 505 | ((rdev->mc.gtt_size) >> PAGE_SHIFT)); | 509 | 0, rdev->mc.gtt_size >> PAGE_SHIFT); |
| 506 | if (r) { | 510 | if (r) { |
| 507 | DRM_ERROR("Failed initializing GTT heap.\n"); | 511 | DRM_ERROR("Failed initializing GTT heap.\n"); |
| 508 | return r; | 512 | return r; |
| @@ -523,9 +527,15 @@ int radeon_ttm_init(struct radeon_device *rdev) | |||
| 523 | 527 | ||
| 524 | void radeon_ttm_fini(struct radeon_device *rdev) | 528 | void radeon_ttm_fini(struct radeon_device *rdev) |
| 525 | { | 529 | { |
| 530 | int r; | ||
| 531 | |||
| 526 | if (rdev->stollen_vga_memory) { | 532 | if (rdev->stollen_vga_memory) { |
| 527 | radeon_object_unpin(rdev->stollen_vga_memory); | 533 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); |
| 528 | radeon_object_unref(&rdev->stollen_vga_memory); | 534 | if (r == 0) { |
| 535 | radeon_bo_unpin(rdev->stollen_vga_memory); | ||
| 536 | radeon_bo_unreserve(rdev->stollen_vga_memory); | ||
| 537 | } | ||
| 538 | radeon_bo_unref(&rdev->stollen_vga_memory); | ||
| 529 | } | 539 | } |
| 530 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM); | 540 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM); |
| 531 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); | 541 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index ca037160a582..eda6d757b5c4 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
| @@ -352,7 +352,7 @@ static int rs400_mc_init(struct radeon_device *rdev) | |||
| 352 | u32 tmp; | 352 | u32 tmp; |
| 353 | 353 | ||
| 354 | /* Setup GPU memory space */ | 354 | /* Setup GPU memory space */ |
| 355 | tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM)); | 355 | tmp = RREG32(R_00015C_NB_TOM); |
| 356 | rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16; | 356 | rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16; |
| 357 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | 357 | rdev->mc.gtt_location = 0xFFFFFFFFUL; |
| 358 | r = radeon_mc_setup(rdev); | 358 | r = radeon_mc_setup(rdev); |
| @@ -387,13 +387,13 @@ static int rs400_startup(struct radeon_device *rdev) | |||
| 387 | r300_clock_startup(rdev); | 387 | r300_clock_startup(rdev); |
| 388 | /* Initialize GPU configuration (# pipes, ...) */ | 388 | /* Initialize GPU configuration (# pipes, ...) */ |
| 389 | rs400_gpu_init(rdev); | 389 | rs400_gpu_init(rdev); |
| 390 | r100_enable_bm(rdev); | ||
| 390 | /* Initialize GART (initialize after TTM so we can allocate | 391 | /* Initialize GART (initialize after TTM so we can allocate |
| 391 | * memory through TTM but finalize after TTM) */ | 392 | * memory through TTM but finalize after TTM) */ |
| 392 | r = rs400_gart_enable(rdev); | 393 | r = rs400_gart_enable(rdev); |
| 393 | if (r) | 394 | if (r) |
| 394 | return r; | 395 | return r; |
| 395 | /* Enable IRQ */ | 396 | /* Enable IRQ */ |
| 396 | rdev->irq.sw_int = true; | ||
| 397 | r100_irq_set(rdev); | 397 | r100_irq_set(rdev); |
| 398 | /* 1M ring buffer */ | 398 | /* 1M ring buffer */ |
| 399 | r = r100_cp_init(rdev, 1024 * 1024); | 399 | r = r100_cp_init(rdev, 1024 * 1024); |
| @@ -452,7 +452,7 @@ void rs400_fini(struct radeon_device *rdev) | |||
| 452 | rs400_gart_fini(rdev); | 452 | rs400_gart_fini(rdev); |
| 453 | radeon_irq_kms_fini(rdev); | 453 | radeon_irq_kms_fini(rdev); |
| 454 | radeon_fence_driver_fini(rdev); | 454 | radeon_fence_driver_fini(rdev); |
| 455 | radeon_object_fini(rdev); | 455 | radeon_bo_fini(rdev); |
| 456 | radeon_atombios_fini(rdev); | 456 | radeon_atombios_fini(rdev); |
| 457 | kfree(rdev->bios); | 457 | kfree(rdev->bios); |
| 458 | rdev->bios = NULL; | 458 | rdev->bios = NULL; |
| @@ -490,10 +490,9 @@ int rs400_init(struct radeon_device *rdev) | |||
| 490 | RREG32(R_0007C0_CP_STAT)); | 490 | RREG32(R_0007C0_CP_STAT)); |
| 491 | } | 491 | } |
| 492 | /* check if cards are posted or not */ | 492 | /* check if cards are posted or not */ |
| 493 | if (!radeon_card_posted(rdev) && rdev->bios) { | 493 | if (radeon_boot_test_post_card(rdev) == false) |
| 494 | DRM_INFO("GPU not posted. posting now...\n"); | 494 | return -EINVAL; |
| 495 | radeon_combios_asic_init(rdev->ddev); | 495 | |
| 496 | } | ||
| 497 | /* Initialize clocks */ | 496 | /* Initialize clocks */ |
| 498 | radeon_get_clock_info(rdev->ddev); | 497 | radeon_get_clock_info(rdev->ddev); |
| 499 | /* Get vram informations */ | 498 | /* Get vram informations */ |
| @@ -510,7 +509,7 @@ int rs400_init(struct radeon_device *rdev) | |||
| 510 | if (r) | 509 | if (r) |
| 511 | return r; | 510 | return r; |
| 512 | /* Memory manager */ | 511 | /* Memory manager */ |
| 513 | r = radeon_object_init(rdev); | 512 | r = radeon_bo_init(rdev); |
| 514 | if (r) | 513 | if (r) |
| 515 | return r; | 514 | return r; |
| 516 | r = rs400_gart_init(rdev); | 515 | r = rs400_gart_init(rdev); |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 5f117cd8736a..84b26376027d 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
| @@ -45,6 +45,21 @@ | |||
| 45 | void rs600_gpu_init(struct radeon_device *rdev); | 45 | void rs600_gpu_init(struct radeon_device *rdev); |
| 46 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); | 46 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); |
| 47 | 47 | ||
| 48 | int rs600_mc_init(struct radeon_device *rdev) | ||
| 49 | { | ||
| 50 | /* read back the MC value from the hw */ | ||
| 51 | int r; | ||
| 52 | u32 tmp; | ||
| 53 | |||
| 54 | /* Setup GPU memory space */ | ||
| 55 | tmp = RREG32_MC(R_000004_MC_FB_LOCATION); | ||
| 56 | rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16; | ||
| 57 | rdev->mc.gtt_location = 0xffffffffUL; | ||
| 58 | r = radeon_mc_setup(rdev); | ||
| 59 | if (r) | ||
| 60 | return r; | ||
| 61 | return 0; | ||
| 62 | } | ||
| 48 | /* | 63 | /* |
| 49 | * GART. | 64 | * GART. |
| 50 | */ | 65 | */ |
| @@ -100,40 +115,40 @@ int rs600_gart_enable(struct radeon_device *rdev) | |||
| 100 | WREG32(R_00004C_BUS_CNTL, tmp); | 115 | WREG32(R_00004C_BUS_CNTL, tmp); |
| 101 | /* FIXME: setup default page */ | 116 | /* FIXME: setup default page */ |
| 102 | WREG32_MC(R_000100_MC_PT0_CNTL, | 117 | WREG32_MC(R_000100_MC_PT0_CNTL, |
| 103 | (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | | 118 | (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | |
| 104 | S_000100_EFFECTIVE_L2_QUEUE_SIZE(6))); | 119 | S_000100_EFFECTIVE_L2_QUEUE_SIZE(6))); |
| 120 | |||
| 105 | for (i = 0; i < 19; i++) { | 121 | for (i = 0; i < 19; i++) { |
| 106 | WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i, | 122 | WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i, |
| 107 | S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) | | 123 | S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) | |
| 108 | S_00016C_SYSTEM_ACCESS_MODE_MASK( | 124 | S_00016C_SYSTEM_ACCESS_MODE_MASK( |
| 109 | V_00016C_SYSTEM_ACCESS_MODE_IN_SYS) | | 125 | V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) | |
| 110 | S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS( | 126 | S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS( |
| 111 | V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE) | | 127 | V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) | |
| 112 | S_00016C_EFFECTIVE_L1_CACHE_SIZE(1) | | 128 | S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) | |
| 113 | S_00016C_ENABLE_FRAGMENT_PROCESSING(1) | | 129 | S_00016C_ENABLE_FRAGMENT_PROCESSING(1) | |
| 114 | S_00016C_EFFECTIVE_L1_QUEUE_SIZE(1)); | 130 | S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3)); |
| 115 | } | 131 | } |
| 116 | |||
| 117 | /* System context map to GART space */ | ||
| 118 | WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_start); | ||
| 119 | WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.gtt_end); | ||
| 120 | |||
| 121 | /* enable first context */ | 132 | /* enable first context */ |
| 122 | WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start); | ||
| 123 | WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end); | ||
| 124 | WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL, | 133 | WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL, |
| 125 | S_000102_ENABLE_PAGE_TABLE(1) | | 134 | S_000102_ENABLE_PAGE_TABLE(1) | |
| 126 | S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT)); | 135 | S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT)); |
| 136 | |||
| 127 | /* disable all other contexts */ | 137 | /* disable all other contexts */ |
| 128 | for (i = 1; i < 8; i++) { | 138 | for (i = 1; i < 8; i++) |
| 129 | WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0); | 139 | WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0); |
| 130 | } | ||
| 131 | 140 | ||
| 132 | /* setup the page table */ | 141 | /* setup the page table */ |
| 133 | WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, | 142 | WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, |
| 134 | rdev->gart.table_addr); | 143 | rdev->gart.table_addr); |
| 144 | WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start); | ||
| 145 | WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end); | ||
| 135 | WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); | 146 | WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); |
| 136 | 147 | ||
| 148 | /* System context maps to VRAM space */ | ||
| 149 | WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start); | ||
| 150 | WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end); | ||
| 151 | |||
| 137 | /* enable page tables */ | 152 | /* enable page tables */ |
| 138 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); | 153 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
| 139 | WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1))); | 154 | WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1))); |
| @@ -146,15 +161,20 @@ int rs600_gart_enable(struct radeon_device *rdev) | |||
| 146 | 161 | ||
| 147 | void rs600_gart_disable(struct radeon_device *rdev) | 162 | void rs600_gart_disable(struct radeon_device *rdev) |
| 148 | { | 163 | { |
| 149 | uint32_t tmp; | 164 | u32 tmp; |
| 165 | int r; | ||
| 150 | 166 | ||
| 151 | /* FIXME: disable out of gart access */ | 167 | /* FIXME: disable out of gart access */ |
| 152 | WREG32_MC(R_000100_MC_PT0_CNTL, 0); | 168 | WREG32_MC(R_000100_MC_PT0_CNTL, 0); |
| 153 | tmp = RREG32_MC(R_000009_MC_CNTL1); | 169 | tmp = RREG32_MC(R_000009_MC_CNTL1); |
| 154 | WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); | 170 | WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); |
| 155 | if (rdev->gart.table.vram.robj) { | 171 | if (rdev->gart.table.vram.robj) { |
| 156 | radeon_object_kunmap(rdev->gart.table.vram.robj); | 172 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); |
| 157 | radeon_object_unpin(rdev->gart.table.vram.robj); | 173 | if (r == 0) { |
| 174 | radeon_bo_kunmap(rdev->gart.table.vram.robj); | ||
| 175 | radeon_bo_unpin(rdev->gart.table.vram.robj); | ||
| 176 | radeon_bo_unreserve(rdev->gart.table.vram.robj); | ||
| 177 | } | ||
| 158 | } | 178 | } |
| 159 | } | 179 | } |
| 160 | 180 | ||
| @@ -301,9 +321,7 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev) | |||
| 301 | 321 | ||
| 302 | void rs600_gpu_init(struct radeon_device *rdev) | 322 | void rs600_gpu_init(struct radeon_device *rdev) |
| 303 | { | 323 | { |
| 304 | /* FIXME: HDP same place on rs600 ? */ | ||
| 305 | r100_hdp_reset(rdev); | 324 | r100_hdp_reset(rdev); |
| 306 | /* FIXME: is this correct ? */ | ||
| 307 | r420_pipes_init(rdev); | 325 | r420_pipes_init(rdev); |
| 308 | /* Wait for mc idle */ | 326 | /* Wait for mc idle */ |
| 309 | if (rs600_mc_wait_for_idle(rdev)) | 327 | if (rs600_mc_wait_for_idle(rdev)) |
| @@ -312,9 +330,20 @@ void rs600_gpu_init(struct radeon_device *rdev) | |||
| 312 | 330 | ||
| 313 | void rs600_vram_info(struct radeon_device *rdev) | 331 | void rs600_vram_info(struct radeon_device *rdev) |
| 314 | { | 332 | { |
| 315 | /* FIXME: to do or is these values sane ? */ | ||
| 316 | rdev->mc.vram_is_ddr = true; | 333 | rdev->mc.vram_is_ddr = true; |
| 317 | rdev->mc.vram_width = 128; | 334 | rdev->mc.vram_width = 128; |
| 335 | |||
| 336 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | ||
| 337 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | ||
| 338 | |||
| 339 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | ||
| 340 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | ||
| 341 | |||
| 342 | if (rdev->mc.mc_vram_size > rdev->mc.aper_size) | ||
| 343 | rdev->mc.mc_vram_size = rdev->mc.aper_size; | ||
| 344 | |||
| 345 | if (rdev->mc.real_vram_size > rdev->mc.aper_size) | ||
| 346 | rdev->mc.real_vram_size = rdev->mc.aper_size; | ||
| 318 | } | 347 | } |
| 319 | 348 | ||
| 320 | void rs600_bandwidth_update(struct radeon_device *rdev) | 349 | void rs600_bandwidth_update(struct radeon_device *rdev) |
| @@ -388,7 +417,6 @@ static int rs600_startup(struct radeon_device *rdev) | |||
| 388 | if (r) | 417 | if (r) |
| 389 | return r; | 418 | return r; |
| 390 | /* Enable IRQ */ | 419 | /* Enable IRQ */ |
| 391 | rdev->irq.sw_int = true; | ||
| 392 | rs600_irq_set(rdev); | 420 | rs600_irq_set(rdev); |
| 393 | /* 1M ring buffer */ | 421 | /* 1M ring buffer */ |
| 394 | r = r100_cp_init(rdev, 1024 * 1024); | 422 | r = r100_cp_init(rdev, 1024 * 1024); |
| @@ -445,7 +473,7 @@ void rs600_fini(struct radeon_device *rdev) | |||
| 445 | rs600_gart_fini(rdev); | 473 | rs600_gart_fini(rdev); |
| 446 | radeon_irq_kms_fini(rdev); | 474 | radeon_irq_kms_fini(rdev); |
| 447 | radeon_fence_driver_fini(rdev); | 475 | radeon_fence_driver_fini(rdev); |
| 448 | radeon_object_fini(rdev); | 476 | radeon_bo_fini(rdev); |
| 449 | radeon_atombios_fini(rdev); | 477 | radeon_atombios_fini(rdev); |
| 450 | kfree(rdev->bios); | 478 | kfree(rdev->bios); |
| 451 | rdev->bios = NULL; | 479 | rdev->bios = NULL; |
| @@ -482,10 +510,9 @@ int rs600_init(struct radeon_device *rdev) | |||
| 482 | RREG32(R_0007C0_CP_STAT)); | 510 | RREG32(R_0007C0_CP_STAT)); |
| 483 | } | 511 | } |
| 484 | /* check if cards are posted or not */ | 512 | /* check if cards are posted or not */ |
| 485 | if (!radeon_card_posted(rdev) && rdev->bios) { | 513 | if (radeon_boot_test_post_card(rdev) == false) |
| 486 | DRM_INFO("GPU not posted. posting now...\n"); | 514 | return -EINVAL; |
| 487 | atom_asic_init(rdev->mode_info.atom_context); | 515 | |
| 488 | } | ||
| 489 | /* Initialize clocks */ | 516 | /* Initialize clocks */ |
| 490 | radeon_get_clock_info(rdev->ddev); | 517 | radeon_get_clock_info(rdev->ddev); |
| 491 | /* Initialize power management */ | 518 | /* Initialize power management */ |
| @@ -493,7 +520,7 @@ int rs600_init(struct radeon_device *rdev) | |||
| 493 | /* Get vram informations */ | 520 | /* Get vram informations */ |
| 494 | rs600_vram_info(rdev); | 521 | rs600_vram_info(rdev); |
| 495 | /* Initialize memory controller (also test AGP) */ | 522 | /* Initialize memory controller (also test AGP) */ |
| 496 | r = r420_mc_init(rdev); | 523 | r = rs600_mc_init(rdev); |
| 497 | if (r) | 524 | if (r) |
| 498 | return r; | 525 | return r; |
| 499 | rs600_debugfs(rdev); | 526 | rs600_debugfs(rdev); |
| @@ -505,7 +532,7 @@ int rs600_init(struct radeon_device *rdev) | |||
| 505 | if (r) | 532 | if (r) |
| 506 | return r; | 533 | return r; |
| 507 | /* Memory manager */ | 534 | /* Memory manager */ |
| 508 | r = radeon_object_init(rdev); | 535 | r = radeon_bo_init(rdev); |
| 509 | if (r) | 536 | if (r) |
| 510 | return r; | 537 | return r; |
| 511 | r = rs600_gart_init(rdev); | 538 | r = rs600_gart_init(rdev); |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 27547175cf93..eb486ee7ea00 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
| @@ -131,24 +131,25 @@ void rs690_pm_info(struct radeon_device *rdev) | |||
| 131 | 131 | ||
| 132 | void rs690_vram_info(struct radeon_device *rdev) | 132 | void rs690_vram_info(struct radeon_device *rdev) |
| 133 | { | 133 | { |
| 134 | uint32_t tmp; | ||
| 135 | fixed20_12 a; | 134 | fixed20_12 a; |
| 136 | 135 | ||
| 137 | rs400_gart_adjust_size(rdev); | 136 | rs400_gart_adjust_size(rdev); |
| 138 | /* DDR for all card after R300 & IGP */ | 137 | |
| 139 | rdev->mc.vram_is_ddr = true; | 138 | rdev->mc.vram_is_ddr = true; |
| 140 | /* FIXME: is this correct for RS690/RS740 ? */ | 139 | rdev->mc.vram_width = 128; |
| 141 | tmp = RREG32(RADEON_MEM_CNTL); | 140 | |
| 142 | if (tmp & R300_MEM_NUM_CHANNELS_MASK) { | ||
| 143 | rdev->mc.vram_width = 128; | ||
| 144 | } else { | ||
| 145 | rdev->mc.vram_width = 64; | ||
| 146 | } | ||
| 147 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | 141 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
| 148 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | 142 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; |
| 149 | 143 | ||
| 150 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | 144 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
| 151 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | 145 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); |
| 146 | |||
| 147 | if (rdev->mc.mc_vram_size > rdev->mc.aper_size) | ||
| 148 | rdev->mc.mc_vram_size = rdev->mc.aper_size; | ||
| 149 | |||
| 150 | if (rdev->mc.real_vram_size > rdev->mc.aper_size) | ||
| 151 | rdev->mc.real_vram_size = rdev->mc.aper_size; | ||
| 152 | |||
| 152 | rs690_pm_info(rdev); | 153 | rs690_pm_info(rdev); |
| 153 | /* FIXME: we should enforce default clock in case GPU is not in | 154 | /* FIXME: we should enforce default clock in case GPU is not in |
| 154 | * default setup | 155 | * default setup |
| @@ -161,6 +162,21 @@ void rs690_vram_info(struct radeon_device *rdev) | |||
| 161 | rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); | 162 | rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); |
| 162 | } | 163 | } |
| 163 | 164 | ||
| 165 | static int rs690_mc_init(struct radeon_device *rdev) | ||
| 166 | { | ||
| 167 | int r; | ||
| 168 | u32 tmp; | ||
| 169 | |||
| 170 | /* Setup GPU memory space */ | ||
| 171 | tmp = RREG32_MC(R_000100_MCCFG_FB_LOCATION); | ||
| 172 | rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16; | ||
| 173 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | ||
| 174 | r = radeon_mc_setup(rdev); | ||
| 175 | if (r) | ||
| 176 | return r; | ||
| 177 | return 0; | ||
| 178 | } | ||
| 179 | |||
| 164 | void rs690_line_buffer_adjust(struct radeon_device *rdev, | 180 | void rs690_line_buffer_adjust(struct radeon_device *rdev, |
| 165 | struct drm_display_mode *mode1, | 181 | struct drm_display_mode *mode1, |
| 166 | struct drm_display_mode *mode2) | 182 | struct drm_display_mode *mode2) |
| @@ -605,7 +621,6 @@ static int rs690_startup(struct radeon_device *rdev) | |||
| 605 | if (r) | 621 | if (r) |
| 606 | return r; | 622 | return r; |
| 607 | /* Enable IRQ */ | 623 | /* Enable IRQ */ |
| 608 | rdev->irq.sw_int = true; | ||
| 609 | rs600_irq_set(rdev); | 624 | rs600_irq_set(rdev); |
| 610 | /* 1M ring buffer */ | 625 | /* 1M ring buffer */ |
| 611 | r = r100_cp_init(rdev, 1024 * 1024); | 626 | r = r100_cp_init(rdev, 1024 * 1024); |
| @@ -662,7 +677,7 @@ void rs690_fini(struct radeon_device *rdev) | |||
| 662 | rs400_gart_fini(rdev); | 677 | rs400_gart_fini(rdev); |
| 663 | radeon_irq_kms_fini(rdev); | 678 | radeon_irq_kms_fini(rdev); |
| 664 | radeon_fence_driver_fini(rdev); | 679 | radeon_fence_driver_fini(rdev); |
| 665 | radeon_object_fini(rdev); | 680 | radeon_bo_fini(rdev); |
| 666 | radeon_atombios_fini(rdev); | 681 | radeon_atombios_fini(rdev); |
| 667 | kfree(rdev->bios); | 682 | kfree(rdev->bios); |
| 668 | rdev->bios = NULL; | 683 | rdev->bios = NULL; |
| @@ -700,10 +715,9 @@ int rs690_init(struct radeon_device *rdev) | |||
| 700 | RREG32(R_0007C0_CP_STAT)); | 715 | RREG32(R_0007C0_CP_STAT)); |
| 701 | } | 716 | } |
| 702 | /* check if cards are posted or not */ | 717 | /* check if cards are posted or not */ |
| 703 | if (!radeon_card_posted(rdev) && rdev->bios) { | 718 | if (radeon_boot_test_post_card(rdev) == false) |
| 704 | DRM_INFO("GPU not posted. posting now...\n"); | 719 | return -EINVAL; |
| 705 | atom_asic_init(rdev->mode_info.atom_context); | 720 | |
| 706 | } | ||
| 707 | /* Initialize clocks */ | 721 | /* Initialize clocks */ |
| 708 | radeon_get_clock_info(rdev->ddev); | 722 | radeon_get_clock_info(rdev->ddev); |
| 709 | /* Initialize power management */ | 723 | /* Initialize power management */ |
| @@ -711,7 +725,7 @@ int rs690_init(struct radeon_device *rdev) | |||
| 711 | /* Get vram informations */ | 725 | /* Get vram informations */ |
| 712 | rs690_vram_info(rdev); | 726 | rs690_vram_info(rdev); |
| 713 | /* Initialize memory controller (also test AGP) */ | 727 | /* Initialize memory controller (also test AGP) */ |
| 714 | r = r420_mc_init(rdev); | 728 | r = rs690_mc_init(rdev); |
| 715 | if (r) | 729 | if (r) |
| 716 | return r; | 730 | return r; |
| 717 | rv515_debugfs(rdev); | 731 | rv515_debugfs(rdev); |
| @@ -723,7 +737,7 @@ int rs690_init(struct radeon_device *rdev) | |||
| 723 | if (r) | 737 | if (r) |
| 724 | return r; | 738 | return r; |
| 725 | /* Memory manager */ | 739 | /* Memory manager */ |
| 726 | r = radeon_object_init(rdev); | 740 | r = radeon_bo_init(rdev); |
| 727 | if (r) | 741 | if (r) |
| 728 | return r; | 742 | return r; |
| 729 | r = rs400_gart_init(rdev); | 743 | r = rs400_gart_init(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index ba68c9fe90a1..7793239e24b2 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
| @@ -478,7 +478,6 @@ static int rv515_startup(struct radeon_device *rdev) | |||
| 478 | return r; | 478 | return r; |
| 479 | } | 479 | } |
| 480 | /* Enable IRQ */ | 480 | /* Enable IRQ */ |
| 481 | rdev->irq.sw_int = true; | ||
| 482 | rs600_irq_set(rdev); | 481 | rs600_irq_set(rdev); |
| 483 | /* 1M ring buffer */ | 482 | /* 1M ring buffer */ |
| 484 | r = r100_cp_init(rdev, 1024 * 1024); | 483 | r = r100_cp_init(rdev, 1024 * 1024); |
| @@ -540,11 +539,11 @@ void rv515_fini(struct radeon_device *rdev) | |||
| 540 | r100_wb_fini(rdev); | 539 | r100_wb_fini(rdev); |
| 541 | r100_ib_fini(rdev); | 540 | r100_ib_fini(rdev); |
| 542 | radeon_gem_fini(rdev); | 541 | radeon_gem_fini(rdev); |
| 543 | rv370_pcie_gart_fini(rdev); | 542 | rv370_pcie_gart_fini(rdev); |
| 544 | radeon_agp_fini(rdev); | 543 | radeon_agp_fini(rdev); |
| 545 | radeon_irq_kms_fini(rdev); | 544 | radeon_irq_kms_fini(rdev); |
| 546 | radeon_fence_driver_fini(rdev); | 545 | radeon_fence_driver_fini(rdev); |
| 547 | radeon_object_fini(rdev); | 546 | radeon_bo_fini(rdev); |
| 548 | radeon_atombios_fini(rdev); | 547 | radeon_atombios_fini(rdev); |
| 549 | kfree(rdev->bios); | 548 | kfree(rdev->bios); |
| 550 | rdev->bios = NULL; | 549 | rdev->bios = NULL; |
| @@ -580,10 +579,8 @@ int rv515_init(struct radeon_device *rdev) | |||
| 580 | RREG32(R_0007C0_CP_STAT)); | 579 | RREG32(R_0007C0_CP_STAT)); |
| 581 | } | 580 | } |
| 582 | /* check if cards are posted or not */ | 581 | /* check if cards are posted or not */ |
| 583 | if (!radeon_card_posted(rdev) && rdev->bios) { | 582 | if (radeon_boot_test_post_card(rdev) == false) |
| 584 | DRM_INFO("GPU not posted. posting now...\n"); | 583 | return -EINVAL; |
| 585 | atom_asic_init(rdev->mode_info.atom_context); | ||
| 586 | } | ||
| 587 | /* Initialize clocks */ | 584 | /* Initialize clocks */ |
| 588 | radeon_get_clock_info(rdev->ddev); | 585 | radeon_get_clock_info(rdev->ddev); |
| 589 | /* Initialize power management */ | 586 | /* Initialize power management */ |
| @@ -603,7 +600,7 @@ int rv515_init(struct radeon_device *rdev) | |||
| 603 | if (r) | 600 | if (r) |
| 604 | return r; | 601 | return r; |
| 605 | /* Memory manager */ | 602 | /* Memory manager */ |
| 606 | r = radeon_object_init(rdev); | 603 | r = radeon_bo_init(rdev); |
| 607 | if (r) | 604 | if (r) |
| 608 | return r; | 605 | return r; |
| 609 | r = rv370_pcie_gart_init(rdev); | 606 | r = rv370_pcie_gart_init(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index b0efd0ddae7a..dd4f02096a80 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
| @@ -92,7 +92,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev) | |||
| 92 | void rv770_pcie_gart_disable(struct radeon_device *rdev) | 92 | void rv770_pcie_gart_disable(struct radeon_device *rdev) |
| 93 | { | 93 | { |
| 94 | u32 tmp; | 94 | u32 tmp; |
| 95 | int i; | 95 | int i, r; |
| 96 | 96 | ||
| 97 | /* Disable all tables */ | 97 | /* Disable all tables */ |
| 98 | for (i = 0; i < 7; i++) | 98 | for (i = 0; i < 7; i++) |
| @@ -113,8 +113,12 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev) | |||
| 113 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); | 113 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); |
| 114 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); | 114 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); |
| 115 | if (rdev->gart.table.vram.robj) { | 115 | if (rdev->gart.table.vram.robj) { |
| 116 | radeon_object_kunmap(rdev->gart.table.vram.robj); | 116 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); |
| 117 | radeon_object_unpin(rdev->gart.table.vram.robj); | 117 | if (likely(r == 0)) { |
| 118 | radeon_bo_kunmap(rdev->gart.table.vram.robj); | ||
| 119 | radeon_bo_unpin(rdev->gart.table.vram.robj); | ||
| 120 | radeon_bo_unreserve(rdev->gart.table.vram.robj); | ||
| 121 | } | ||
| 118 | } | 122 | } |
| 119 | } | 123 | } |
| 120 | 124 | ||
| @@ -880,13 +884,26 @@ static int rv770_startup(struct radeon_device *rdev) | |||
| 880 | } | 884 | } |
| 881 | rv770_gpu_init(rdev); | 885 | rv770_gpu_init(rdev); |
| 882 | 886 | ||
| 883 | r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | 887 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
| 884 | &rdev->r600_blit.shader_gpu_addr); | 888 | if (unlikely(r != 0)) |
| 889 | return r; | ||
| 890 | r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | ||
| 891 | &rdev->r600_blit.shader_gpu_addr); | ||
| 892 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
| 885 | if (r) { | 893 | if (r) { |
| 886 | DRM_ERROR("failed to pin blit object %d\n", r); | 894 | DRM_ERROR("failed to pin blit object %d\n", r); |
| 887 | return r; | 895 | return r; |
| 888 | } | 896 | } |
| 889 | 897 | ||
| 898 | /* Enable IRQ */ | ||
| 899 | r = r600_irq_init(rdev); | ||
| 900 | if (r) { | ||
| 901 | DRM_ERROR("radeon: IH init failed (%d).\n", r); | ||
| 902 | radeon_irq_kms_fini(rdev); | ||
| 903 | return r; | ||
| 904 | } | ||
| 905 | r600_irq_set(rdev); | ||
| 906 | |||
| 890 | r = radeon_ring_init(rdev, rdev->cp.ring_size); | 907 | r = radeon_ring_init(rdev, rdev->cp.ring_size); |
| 891 | if (r) | 908 | if (r) |
| 892 | return r; | 909 | return r; |
| @@ -934,13 +951,19 @@ int rv770_resume(struct radeon_device *rdev) | |||
| 934 | 951 | ||
| 935 | int rv770_suspend(struct radeon_device *rdev) | 952 | int rv770_suspend(struct radeon_device *rdev) |
| 936 | { | 953 | { |
| 954 | int r; | ||
| 955 | |||
| 937 | /* FIXME: we should wait for ring to be empty */ | 956 | /* FIXME: we should wait for ring to be empty */ |
| 938 | r700_cp_stop(rdev); | 957 | r700_cp_stop(rdev); |
| 939 | rdev->cp.ready = false; | 958 | rdev->cp.ready = false; |
| 940 | r600_wb_disable(rdev); | 959 | r600_wb_disable(rdev); |
| 941 | rv770_pcie_gart_disable(rdev); | 960 | rv770_pcie_gart_disable(rdev); |
| 942 | /* unpin shaders bo */ | 961 | /* unpin shaders bo */ |
| 943 | radeon_object_unpin(rdev->r600_blit.shader_obj); | 962 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
| 963 | if (likely(r == 0)) { | ||
| 964 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | ||
| 965 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
| 966 | } | ||
| 944 | return 0; | 967 | return 0; |
| 945 | } | 968 | } |
| 946 | 969 | ||
| @@ -975,7 +998,11 @@ int rv770_init(struct radeon_device *rdev) | |||
| 975 | if (r) | 998 | if (r) |
| 976 | return r; | 999 | return r; |
| 977 | /* Post card if necessary */ | 1000 | /* Post card if necessary */ |
| 978 | if (!r600_card_posted(rdev) && rdev->bios) { | 1001 | if (!r600_card_posted(rdev)) { |
| 1002 | if (!rdev->bios) { | ||
| 1003 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); | ||
| 1004 | return -EINVAL; | ||
| 1005 | } | ||
| 979 | DRM_INFO("GPU not posted. posting now...\n"); | 1006 | DRM_INFO("GPU not posted. posting now...\n"); |
| 980 | atom_asic_init(rdev->mode_info.atom_context); | 1007 | atom_asic_init(rdev->mode_info.atom_context); |
| 981 | } | 1008 | } |
| @@ -998,14 +1025,22 @@ int rv770_init(struct radeon_device *rdev) | |||
| 998 | if (r) | 1025 | if (r) |
| 999 | return r; | 1026 | return r; |
| 1000 | /* Memory manager */ | 1027 | /* Memory manager */ |
| 1001 | r = radeon_object_init(rdev); | 1028 | r = radeon_bo_init(rdev); |
| 1029 | if (r) | ||
| 1030 | return r; | ||
| 1031 | |||
| 1032 | r = radeon_irq_kms_init(rdev); | ||
| 1002 | if (r) | 1033 | if (r) |
| 1003 | return r; | 1034 | return r; |
| 1035 | |||
| 1004 | rdev->cp.ring_obj = NULL; | 1036 | rdev->cp.ring_obj = NULL; |
| 1005 | r600_ring_init(rdev, 1024 * 1024); | 1037 | r600_ring_init(rdev, 1024 * 1024); |
| 1006 | 1038 | ||
| 1007 | if (!rdev->me_fw || !rdev->pfp_fw) { | 1039 | rdev->ih.ring_obj = NULL; |
| 1008 | r = r600_cp_init_microcode(rdev); | 1040 | r600_ih_ring_init(rdev, 64 * 1024); |
| 1041 | |||
| 1042 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | ||
| 1043 | r = r600_init_microcode(rdev); | ||
| 1009 | if (r) { | 1044 | if (r) { |
| 1010 | DRM_ERROR("Failed to load firmware!\n"); | 1045 | DRM_ERROR("Failed to load firmware!\n"); |
| 1011 | return r; | 1046 | return r; |
| @@ -1051,6 +1086,8 @@ void rv770_fini(struct radeon_device *rdev) | |||
| 1051 | rv770_suspend(rdev); | 1086 | rv770_suspend(rdev); |
| 1052 | 1087 | ||
| 1053 | r600_blit_fini(rdev); | 1088 | r600_blit_fini(rdev); |
| 1089 | r600_irq_fini(rdev); | ||
| 1090 | radeon_irq_kms_fini(rdev); | ||
| 1054 | radeon_ring_fini(rdev); | 1091 | radeon_ring_fini(rdev); |
| 1055 | r600_wb_fini(rdev); | 1092 | r600_wb_fini(rdev); |
| 1056 | rv770_pcie_gart_fini(rdev); | 1093 | rv770_pcie_gart_fini(rdev); |
| @@ -1059,7 +1096,7 @@ void rv770_fini(struct radeon_device *rdev) | |||
| 1059 | radeon_clocks_fini(rdev); | 1096 | radeon_clocks_fini(rdev); |
| 1060 | if (rdev->flags & RADEON_IS_AGP) | 1097 | if (rdev->flags & RADEON_IS_AGP) |
| 1061 | radeon_agp_fini(rdev); | 1098 | radeon_agp_fini(rdev); |
| 1062 | radeon_object_fini(rdev); | 1099 | radeon_bo_fini(rdev); |
| 1063 | radeon_atombios_fini(rdev); | 1100 | radeon_atombios_fini(rdev); |
| 1064 | kfree(rdev->bios); | 1101 | kfree(rdev->bios); |
| 1065 | rdev->bios = NULL; | 1102 | rdev->bios = NULL; |
