aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/r600.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2010-02-24 22:44:04 -0500
committerDave Airlie <airlied@redhat.com>2010-02-26 01:23:23 -0500
commiteb6b6d7cdd5548fa03a919d14615195600013be2 (patch)
treec499472ef28a903f94b09da9a24f66b021412813 /drivers/gpu/drm/radeon/r600.c
parent30d6c72c4a760cfc9069ee100786e4d6cf6de59d (diff)
parent383be5d1789d9a7a2e77dca1cb0aca89507d069e (diff)
Merge remote branch 'korg/drm-radeon-testing' into drm-next-stage
* korg/drm-radeon-testing: (62 commits) drm/radeon/kms: update new pll algo drm/radeon/kms: add support for square microtiles on r3xx-r5xx drm/radeon/kms: force pinning buffer into visible VRAM drm/radeon/kms/evergreen: fix typo in cursor code drm/radeon/kms: implement reading active PCIE lanes on R600+ drm/radeon/kms: for downclocking non-mobility check PERFORMANCE state drm/radeon/kms: simplify storing current and requested PM mode drm/radeon: fixes for r6xx/r7xx gfx init drm/radeon/rv740: fix backend setup drm/radeon/kms: fix R3XX/R4XX memory controller initialization [rfc] drm/radeon/kms: pm debugging check for vbl. drm/radeon: Fix memory allocation failures in the preKMS command stream checking. drm: Add generic multipart buffer. drm/radeon/kms: simplify memory controller setup V2 drm/radeon: Add asic hook for dma copy to r200 cards. drm/radeon/kms: Create asic structure for r300 pcie cards. drm/radeon/kms: remove unused r600_gart_clear_page drm/radeon/kms: remove HDP flushes from fence emit (v2) drm/radeon/kms: add LVDS pll quirk for Dell Studio 15 drm/radeon/kms: simplify picking power state ... Conflicts: drivers/gpu/drm/radeon/atom.c drivers/gpu/drm/radeon/atombios.h drivers/gpu/drm/radeon/atombios_dp.c drivers/gpu/drm/radeon/r600.c drivers/gpu/drm/radeon/r600_audio.c drivers/gpu/drm/radeon/r600_cp.c drivers/gpu/drm/radeon/radeon.h drivers/gpu/drm/radeon/radeon_connectors.c drivers/gpu/drm/radeon/radeon_ring.c drivers/gpu/drm/radeon/rv770.c
Diffstat (limited to 'drivers/gpu/drm/radeon/r600.c')
-rw-r--r--drivers/gpu/drm/radeon/r600.c190
1 files changed, 106 insertions, 84 deletions
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 2ffcf5a03551..c52290197292 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -353,23 +353,14 @@ void r600_hpd_fini(struct radeon_device *rdev)
353/* 353/*
354 * R600 PCIE GART 354 * R600 PCIE GART
355 */ 355 */
356int r600_gart_clear_page(struct radeon_device *rdev, int i)
357{
358 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
359 u64 pte;
360
361 if (i < 0 || i > rdev->gart.num_gpu_pages)
362 return -EINVAL;
363 pte = 0;
364 writeq(pte, ((void __iomem *)ptr) + (i * 8));
365 return 0;
366}
367
368void r600_pcie_gart_tlb_flush(struct radeon_device *rdev) 356void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
369{ 357{
370 unsigned i; 358 unsigned i;
371 u32 tmp; 359 u32 tmp;
372 360
361 /* flush hdp cache so updates hit vram */
362 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
363
373 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12); 364 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
374 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12); 365 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
375 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); 366 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
@@ -416,6 +407,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
416 r = radeon_gart_table_vram_pin(rdev); 407 r = radeon_gart_table_vram_pin(rdev);
417 if (r) 408 if (r)
418 return r; 409 return r;
410 radeon_gart_restore(rdev);
419 411
420 /* Setup L2 cache */ 412 /* Setup L2 cache */
421 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 413 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
@@ -619,6 +611,68 @@ static void r600_mc_program(struct radeon_device *rdev)
619 rv515_vga_render_disable(rdev); 611 rv515_vga_render_disable(rdev);
620} 612}
621 613
614/**
615 * r600_vram_gtt_location - try to find VRAM & GTT location
616 * @rdev: radeon device structure holding all necessary informations
617 * @mc: memory controller structure holding memory informations
618 *
619 * Function will place try to place VRAM at same place as in CPU (PCI)
620 * address space as some GPU seems to have issue when we reprogram at
621 * different address space.
622 *
623 * If there is not enough space to fit the unvisible VRAM after the
624 * aperture then we limit the VRAM size to the aperture.
625 *
626 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
627 * them to be in one from GPU point of view so that we can program GPU to
628 * catch access outside them (weird GPU policy see ??).
629 *
630 * This function will never fails, worst case are limiting VRAM or GTT.
631 *
632 * Note: GTT start, end, size should be initialized before calling this
633 * function on AGP platform.
634 */
635void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
636{
637 u64 size_bf, size_af;
638
639 if (mc->mc_vram_size > 0xE0000000) {
640 /* leave room for at least 512M GTT */
641 dev_warn(rdev->dev, "limiting VRAM\n");
642 mc->real_vram_size = 0xE0000000;
643 mc->mc_vram_size = 0xE0000000;
644 }
645 if (rdev->flags & RADEON_IS_AGP) {
646 size_bf = mc->gtt_start;
647 size_af = 0xFFFFFFFF - mc->gtt_end + 1;
648 if (size_bf > size_af) {
649 if (mc->mc_vram_size > size_bf) {
650 dev_warn(rdev->dev, "limiting VRAM\n");
651 mc->real_vram_size = size_bf;
652 mc->mc_vram_size = size_bf;
653 }
654 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
655 } else {
656 if (mc->mc_vram_size > size_af) {
657 dev_warn(rdev->dev, "limiting VRAM\n");
658 mc->real_vram_size = size_af;
659 mc->mc_vram_size = size_af;
660 }
661 mc->vram_start = mc->gtt_end;
662 }
663 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
664 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
665 mc->mc_vram_size >> 20, mc->vram_start,
666 mc->vram_end, mc->real_vram_size >> 20);
667 } else {
668 u64 base = 0;
669 if (rdev->flags & RADEON_IS_IGP)
670 base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
671 radeon_vram_location(rdev, &rdev->mc, base);
672 radeon_gtt_location(rdev, mc);
673 }
674}
675
622int r600_mc_init(struct radeon_device *rdev) 676int r600_mc_init(struct radeon_device *rdev)
623{ 677{
624 fixed20_12 a; 678 fixed20_12 a;
@@ -658,75 +712,21 @@ int r600_mc_init(struct radeon_device *rdev)
658 /* Setup GPU memory space */ 712 /* Setup GPU memory space */
659 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 713 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
660 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 714 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
661 715 rdev->mc.visible_vram_size = rdev->mc.aper_size;
662 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) 716 /* FIXME remove this once we support unmappable VRAM */
717 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
663 rdev->mc.mc_vram_size = rdev->mc.aper_size; 718 rdev->mc.mc_vram_size = rdev->mc.aper_size;
664
665 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
666 rdev->mc.real_vram_size = rdev->mc.aper_size; 719 rdev->mc.real_vram_size = rdev->mc.aper_size;
667
668 if (rdev->flags & RADEON_IS_AGP) {
669 /* gtt_size is setup by radeon_agp_init */
670 rdev->mc.gtt_location = rdev->mc.agp_base;
671 tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
672 /* Try to put vram before or after AGP because we
673 * we want SYSTEM_APERTURE to cover both VRAM and
674 * AGP so that GPU can catch out of VRAM/AGP access
675 */
676 if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
677 /* Enough place before */
678 rdev->mc.vram_location = rdev->mc.gtt_location -
679 rdev->mc.mc_vram_size;
680 } else if (tmp > rdev->mc.mc_vram_size) {
681 /* Enough place after */
682 rdev->mc.vram_location = rdev->mc.gtt_location +
683 rdev->mc.gtt_size;
684 } else {
685 /* Try to setup VRAM then AGP might not
686 * not work on some card
687 */
688 rdev->mc.vram_location = 0x00000000UL;
689 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
690 }
691 } else {
692 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
693 rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
694 0xFFFF) << 24;
695 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
696 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
697 /* Enough place after vram */
698 rdev->mc.gtt_location = tmp;
699 } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
700 /* Enough place before vram */
701 rdev->mc.gtt_location = 0;
702 } else {
703 /* Not enough place after or before shrink
704 * gart size
705 */
706 if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
707 rdev->mc.gtt_location = 0;
708 rdev->mc.gtt_size = rdev->mc.vram_location;
709 } else {
710 rdev->mc.gtt_location = tmp;
711 rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
712 }
713 }
714 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
715 } 720 }
716 rdev->mc.vram_start = rdev->mc.vram_location; 721 r600_vram_gtt_location(rdev, &rdev->mc);
717 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
718 rdev->mc.gtt_start = rdev->mc.gtt_location;
719 rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
720 /* FIXME: we should enforce default clock in case GPU is not in 722 /* FIXME: we should enforce default clock in case GPU is not in
721 * default setup 723 * default setup
722 */ 724 */
723 a.full = rfixed_const(100); 725 a.full = rfixed_const(100);
724 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); 726 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
725 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); 727 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
726
727 if (rdev->flags & RADEON_IS_IGP) 728 if (rdev->flags & RADEON_IS_IGP)
728 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 729 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
729
730 return 0; 730 return 0;
731} 731}
732 732
@@ -981,6 +981,9 @@ void r600_gpu_init(struct radeon_device *rdev)
981{ 981{
982 u32 tiling_config; 982 u32 tiling_config;
983 u32 ramcfg; 983 u32 ramcfg;
984 u32 backend_map;
985 u32 cc_rb_backend_disable;
986 u32 cc_gc_shader_pipe_config;
984 u32 tmp; 987 u32 tmp;
985 int i, j; 988 int i, j;
986 u32 sq_config; 989 u32 sq_config;
@@ -1090,8 +1093,11 @@ void r600_gpu_init(struct radeon_device *rdev)
1090 default: 1093 default:
1091 break; 1094 break;
1092 } 1095 }
1096 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1097 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1093 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 1098 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1094 tiling_config |= GROUP_SIZE(0); 1099 tiling_config |= GROUP_SIZE(0);
1100 rdev->config.r600.tiling_group_size = 256;
1095 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT; 1101 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1096 if (tmp > 3) { 1102 if (tmp > 3) {
1097 tiling_config |= ROW_TILING(3); 1103 tiling_config |= ROW_TILING(3);
@@ -1101,24 +1107,33 @@ void r600_gpu_init(struct radeon_device *rdev)
1101 tiling_config |= SAMPLE_SPLIT(tmp); 1107 tiling_config |= SAMPLE_SPLIT(tmp);
1102 } 1108 }
1103 tiling_config |= BANK_SWAPS(1); 1109 tiling_config |= BANK_SWAPS(1);
1104 tmp = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes, 1110
1105 rdev->config.r600.max_backends, 1111 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1106 (0xff << rdev->config.r600.max_backends) & 0xff); 1112 cc_rb_backend_disable |=
1107 tiling_config |= BACKEND_MAP(tmp); 1113 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1114
1115 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1116 cc_gc_shader_pipe_config |=
1117 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1118 cc_gc_shader_pipe_config |=
1119 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1120
1121 backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1122 (R6XX_MAX_BACKENDS -
1123 r600_count_pipe_bits((cc_rb_backend_disable &
1124 R6XX_MAX_BACKENDS_MASK) >> 16)),
1125 (cc_rb_backend_disable >> 16));
1126
1127 tiling_config |= BACKEND_MAP(backend_map);
1108 WREG32(GB_TILING_CONFIG, tiling_config); 1128 WREG32(GB_TILING_CONFIG, tiling_config);
1109 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); 1129 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1110 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); 1130 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1111 1131
1112 tmp = BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1113 WREG32(CC_RB_BACKEND_DISABLE, tmp);
1114
1115 /* Setup pipes */ 1132 /* Setup pipes */
1116 tmp = INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK); 1133 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1117 tmp |= INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK); 1134 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1118 WREG32(CC_GC_SHADER_PIPE_CONFIG, tmp);
1119 WREG32(GC_USER_SHADER_PIPE_CONFIG, tmp);
1120 1135
1121 tmp = R6XX_MAX_BACKENDS - r600_count_pipe_bits(tmp & INACTIVE_QD_PIPES_MASK); 1136 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1122 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); 1137 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1123 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK); 1138 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1124 1139
@@ -1783,12 +1798,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
1783 struct radeon_fence *fence) 1798 struct radeon_fence *fence)
1784{ 1799{
1785 /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */ 1800 /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
1801
1802 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
1803 radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
1804 /* wait for 3D idle clean */
1805 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1806 radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
1807 radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
1786 /* Emit fence sequence & fire IRQ */ 1808 /* Emit fence sequence & fire IRQ */
1787 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 1809 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1788 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 1810 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1789 radeon_ring_write(rdev, fence->seq); 1811 radeon_ring_write(rdev, fence->seq);
1790 radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
1791 radeon_ring_write(rdev, 1);
1792 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ 1812 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
1793 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); 1813 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
1794 radeon_ring_write(rdev, RB_INT_STAT); 1814 radeon_ring_write(rdev, RB_INT_STAT);
@@ -2745,6 +2765,7 @@ restart_ih:
2745 case 0: /* D1 vblank */ 2765 case 0: /* D1 vblank */
2746 if (disp_int & LB_D1_VBLANK_INTERRUPT) { 2766 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
2747 drm_handle_vblank(rdev->ddev, 0); 2767 drm_handle_vblank(rdev->ddev, 0);
2768 wake_up(&rdev->irq.vblank_queue);
2748 disp_int &= ~LB_D1_VBLANK_INTERRUPT; 2769 disp_int &= ~LB_D1_VBLANK_INTERRUPT;
2749 DRM_DEBUG("IH: D1 vblank\n"); 2770 DRM_DEBUG("IH: D1 vblank\n");
2750 } 2771 }
@@ -2765,6 +2786,7 @@ restart_ih:
2765 case 0: /* D2 vblank */ 2786 case 0: /* D2 vblank */
2766 if (disp_int & LB_D2_VBLANK_INTERRUPT) { 2787 if (disp_int & LB_D2_VBLANK_INTERRUPT) {
2767 drm_handle_vblank(rdev->ddev, 1); 2788 drm_handle_vblank(rdev->ddev, 1);
2789 wake_up(&rdev->irq.vblank_queue);
2768 disp_int &= ~LB_D2_VBLANK_INTERRUPT; 2790 disp_int &= ~LB_D2_VBLANK_INTERRUPT;
2769 DRM_DEBUG("IH: D2 vblank\n"); 2791 DRM_DEBUG("IH: D2 vblank\n");
2770 } 2792 }