aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/evergreen.c
diff options
context:
space:
mode:
authorAlex Deucher <alexdeucher@gmail.com>2010-03-24 13:20:41 -0400
committerDave Airlie <airlied@redhat.com>2010-04-08 20:15:32 -0400
commit0fcdb61e78050f8f0b31029eeafa5ae013ce0f35 (patch)
tree7f710199fa2a2b6f7e2c96bf90102645f9cf406a /drivers/gpu/drm/radeon/evergreen.c
parent49f6598277635af13d60e7d2601963356bc48bd8 (diff)
drm/radeon/kms/evergreen: add gart support
Gart setup is more or less like r7xx. Copy rv770d.h to evergreend.h and fix up changes. Signed-off-by: Alex Deucher <alexdeucher@gmail.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/evergreen.c')
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c58
1 files changed, 39 insertions, 19 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7672f11ed995..afcff06ef291 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -27,7 +27,7 @@
27#include "radeon.h" 27#include "radeon.h"
28#include "radeon_asic.h" 28#include "radeon_asic.h"
29#include "radeon_drm.h" 29#include "radeon_drm.h"
30#include "rv770d.h" 30#include "evergreend.h"
31#include "atom.h" 31#include "atom.h"
32#include "avivod.h" 32#include "avivod.h"
33#include "evergreen_reg.h" 33#include "evergreen_reg.h"
@@ -82,10 +82,31 @@ static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
82/* 82/*
83 * GART 83 * GART
84 */ 84 */
85void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
86{
87 unsigned i;
88 u32 tmp;
89
90 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
91 for (i = 0; i < rdev->usec_timeout; i++) {
92 /* read MC_STATUS */
93 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
94 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
95 if (tmp == 2) {
96 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
97 return;
98 }
99 if (tmp) {
100 return;
101 }
102 udelay(1);
103 }
104}
105
85int evergreen_pcie_gart_enable(struct radeon_device *rdev) 106int evergreen_pcie_gart_enable(struct radeon_device *rdev)
86{ 107{
87 u32 tmp; 108 u32 tmp;
88 int r, i; 109 int r;
89 110
90 if (rdev->gart.table.vram.robj == NULL) { 111 if (rdev->gart.table.vram.robj == NULL) {
91 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 112 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
@@ -120,10 +141,9 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
120 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 141 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
121 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 142 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
122 (u32)(rdev->dummy_page.addr >> 12)); 143 (u32)(rdev->dummy_page.addr >> 12));
123 for (i = 1; i < 7; i++) 144 WREG32(VM_CONTEXT1_CNTL, 0);
124 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
125 145
126 r600_pcie_gart_tlb_flush(rdev); 146 evergreen_pcie_gart_tlb_flush(rdev);
127 rdev->gart.ready = true; 147 rdev->gart.ready = true;
128 return 0; 148 return 0;
129} 149}
@@ -131,11 +151,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
131void evergreen_pcie_gart_disable(struct radeon_device *rdev) 151void evergreen_pcie_gart_disable(struct radeon_device *rdev)
132{ 152{
133 u32 tmp; 153 u32 tmp;
134 int i, r; 154 int r;
135 155
136 /* Disable all tables */ 156 /* Disable all tables */
137 for (i = 0; i < 7; i++) 157 WREG32(VM_CONTEXT0_CNTL, 0);
138 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 158 WREG32(VM_CONTEXT1_CNTL, 0);
139 159
140 /* Setup L2 cache */ 160 /* Setup L2 cache */
141 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING | 161 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
@@ -172,7 +192,6 @@ void evergreen_pcie_gart_fini(struct radeon_device *rdev)
172void evergreen_agp_enable(struct radeon_device *rdev) 192void evergreen_agp_enable(struct radeon_device *rdev)
173{ 193{
174 u32 tmp; 194 u32 tmp;
175 int i;
176 195
177 /* Setup L2 cache */ 196 /* Setup L2 cache */
178 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 197 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
@@ -192,8 +211,8 @@ void evergreen_agp_enable(struct radeon_device *rdev)
192 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); 211 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
193 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 212 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
194 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 213 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
195 for (i = 0; i < 7; i++) 214 WREG32(VM_CONTEXT0_CNTL, 0);
196 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 215 WREG32(VM_CONTEXT1_CNTL, 0);
197} 216}
198 217
199static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) 218static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -500,9 +519,9 @@ int evergreen_asic_reset(struct radeon_device *rdev)
500 519
501static int evergreen_startup(struct radeon_device *rdev) 520static int evergreen_startup(struct radeon_device *rdev)
502{ 521{
503#if 0
504 int r; 522 int r;
505 523
524#if 0
506 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 525 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
507 r = r600_init_microcode(rdev); 526 r = r600_init_microcode(rdev);
508 if (r) { 527 if (r) {
@@ -512,15 +531,13 @@ static int evergreen_startup(struct radeon_device *rdev)
512 } 531 }
513#endif 532#endif
514 evergreen_mc_program(rdev); 533 evergreen_mc_program(rdev);
515#if 0
516 if (rdev->flags & RADEON_IS_AGP) { 534 if (rdev->flags & RADEON_IS_AGP) {
517 evergreem_agp_enable(rdev); 535 evergreen_agp_enable(rdev);
518 } else { 536 } else {
519 r = evergreen_pcie_gart_enable(rdev); 537 r = evergreen_pcie_gart_enable(rdev);
520 if (r) 538 if (r)
521 return r; 539 return r;
522 } 540 }
523#endif
524 evergreen_gpu_init(rdev); 541 evergreen_gpu_init(rdev);
525#if 0 542#if 0
526 if (!rdev->r600_blit.shader_obj) { 543 if (!rdev->r600_blit.shader_obj) {
@@ -607,7 +624,10 @@ int evergreen_suspend(struct radeon_device *rdev)
607 r700_cp_stop(rdev); 624 r700_cp_stop(rdev);
608 rdev->cp.ready = false; 625 rdev->cp.ready = false;
609 r600_wb_disable(rdev); 626 r600_wb_disable(rdev);
627#endif
628
610 evergreen_pcie_gart_disable(rdev); 629 evergreen_pcie_gart_disable(rdev);
630#if 0
611 /* unpin shaders bo */ 631 /* unpin shaders bo */
612 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 632 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
613 if (likely(r == 0)) { 633 if (likely(r == 0)) {
@@ -717,18 +737,18 @@ int evergreen_init(struct radeon_device *rdev)
717 737
718 rdev->ih.ring_obj = NULL; 738 rdev->ih.ring_obj = NULL;
719 r600_ih_ring_init(rdev, 64 * 1024); 739 r600_ih_ring_init(rdev, 64 * 1024);
720 740#endif
721 r = r600_pcie_gart_init(rdev); 741 r = r600_pcie_gart_init(rdev);
722 if (r) 742 if (r)
723 return r; 743 return r;
724#endif 744
725 rdev->accel_working = false; 745 rdev->accel_working = false;
726 r = evergreen_startup(rdev); 746 r = evergreen_startup(rdev);
727 if (r) { 747 if (r) {
728 evergreen_suspend(rdev); 748 evergreen_suspend(rdev);
729 /*r600_wb_fini(rdev);*/ 749 /*r600_wb_fini(rdev);*/
730 /*radeon_ring_fini(rdev);*/ 750 /*radeon_ring_fini(rdev);*/
731 /*evergreen_pcie_gart_fini(rdev);*/ 751 evergreen_pcie_gart_fini(rdev);
732 rdev->accel_working = false; 752 rdev->accel_working = false;
733 } 753 }
734 if (rdev->accel_working) { 754 if (rdev->accel_working) {
@@ -756,8 +776,8 @@ void evergreen_fini(struct radeon_device *rdev)
756 radeon_irq_kms_fini(rdev); 776 radeon_irq_kms_fini(rdev);
757 radeon_ring_fini(rdev); 777 radeon_ring_fini(rdev);
758 r600_wb_fini(rdev); 778 r600_wb_fini(rdev);
759 evergreen_pcie_gart_fini(rdev);
760#endif 779#endif
780 evergreen_pcie_gart_fini(rdev);
761 radeon_gem_fini(rdev); 781 radeon_gem_fini(rdev);
762 radeon_fence_driver_fini(rdev); 782 radeon_fence_driver_fini(rdev);
763 radeon_clocks_fini(rdev); 783 radeon_clocks_fini(rdev);