aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/r300.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /drivers/gpu/drm/radeon/r300.c
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/gpu/drm/radeon/r300.c')
-rw-r--r--drivers/gpu/drm/radeon/r300.c302
1 files changed, 198 insertions, 104 deletions
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 2f43ee8e4048..a5ff8076b423 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -26,17 +26,27 @@
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include <linux/seq_file.h> 28#include <linux/seq_file.h>
29#include <linux/slab.h>
29#include "drmP.h" 30#include "drmP.h"
30#include "drm.h" 31#include "drm.h"
31#include "radeon_reg.h" 32#include "radeon_reg.h"
32#include "radeon.h" 33#include "radeon.h"
34#include "radeon_asic.h"
33#include "radeon_drm.h" 35#include "radeon_drm.h"
34#include "r100_track.h" 36#include "r100_track.h"
35#include "r300d.h" 37#include "r300d.h"
36#include "rv350d.h" 38#include "rv350d.h"
37#include "r300_reg_safe.h" 39#include "r300_reg_safe.h"
38 40
39/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 */ 41/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
42 *
43 * GPU Errata:
44 * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
45 * using MMIO to flush host path read cache, this lead to HARDLOCKUP.
46 * However, scheduling such write to the ring seems harmless, i suspect
47 * the CP read collide with the flush somehow, or maybe the MC, hard to
48 * tell. (Jerome Glisse)
49 */
40 50
41/* 51/*
42 * rv370,rv380 PCIE GART 52 * rv370,rv380 PCIE GART
@@ -109,18 +119,19 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
109 r = radeon_gart_table_vram_pin(rdev); 119 r = radeon_gart_table_vram_pin(rdev);
110 if (r) 120 if (r)
111 return r; 121 return r;
122 radeon_gart_restore(rdev);
112 /* discard memory request outside of configured range */ 123 /* discard memory request outside of configured range */
113 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 124 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
114 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 125 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
115 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location); 126 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start);
116 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - RADEON_GPU_PAGE_SIZE; 127 tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK;
117 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); 128 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
118 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); 129 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
119 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); 130 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
120 table_addr = rdev->gart.table_addr; 131 table_addr = rdev->gart.table_addr;
121 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr); 132 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
122 /* FIXME: setup default page */ 133 /* FIXME: setup default page */
123 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location); 134 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
124 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); 135 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
125 /* Clear error */ 136 /* Clear error */
126 WREG32_PCIE(0x18, 0); 137 WREG32_PCIE(0x18, 0);
@@ -137,22 +148,27 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
137 148
138void rv370_pcie_gart_disable(struct radeon_device *rdev) 149void rv370_pcie_gart_disable(struct radeon_device *rdev)
139{ 150{
140 uint32_t tmp; 151 u32 tmp;
152 int r;
141 153
142 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 154 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
143 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 155 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
144 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); 156 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
145 if (rdev->gart.table.vram.robj) { 157 if (rdev->gart.table.vram.robj) {
146 radeon_object_kunmap(rdev->gart.table.vram.robj); 158 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
147 radeon_object_unpin(rdev->gart.table.vram.robj); 159 if (likely(r == 0)) {
160 radeon_bo_kunmap(rdev->gart.table.vram.robj);
161 radeon_bo_unpin(rdev->gart.table.vram.robj);
162 radeon_bo_unreserve(rdev->gart.table.vram.robj);
163 }
148 } 164 }
149} 165}
150 166
151void rv370_pcie_gart_fini(struct radeon_device *rdev) 167void rv370_pcie_gart_fini(struct radeon_device *rdev)
152{ 168{
169 radeon_gart_fini(rdev);
153 rv370_pcie_gart_disable(rdev); 170 rv370_pcie_gart_disable(rdev);
154 radeon_gart_table_vram_free(rdev); 171 radeon_gart_table_vram_free(rdev);
155 radeon_gart_fini(rdev);
156} 172}
157 173
158void r300_fence_ring_emit(struct radeon_device *rdev, 174void r300_fence_ring_emit(struct radeon_device *rdev,
@@ -161,18 +177,25 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
161 /* Who ever call radeon_fence_emit should call ring_lock and ask 177 /* Who ever call radeon_fence_emit should call ring_lock and ask
162 * for enough space (today caller are ib schedule and buffer move) */ 178 * for enough space (today caller are ib schedule and buffer move) */
163 /* Write SC register so SC & US assert idle */ 179 /* Write SC register so SC & US assert idle */
164 radeon_ring_write(rdev, PACKET0(0x43E0, 0)); 180 radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0));
165 radeon_ring_write(rdev, 0); 181 radeon_ring_write(rdev, 0);
166 radeon_ring_write(rdev, PACKET0(0x43E4, 0)); 182 radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0));
167 radeon_ring_write(rdev, 0); 183 radeon_ring_write(rdev, 0);
168 /* Flush 3D cache */ 184 /* Flush 3D cache */
169 radeon_ring_write(rdev, PACKET0(0x4E4C, 0)); 185 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
170 radeon_ring_write(rdev, (2 << 0)); 186 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH);
171 radeon_ring_write(rdev, PACKET0(0x4F18, 0)); 187 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
172 radeon_ring_write(rdev, (1 << 0)); 188 radeon_ring_write(rdev, R300_ZC_FLUSH);
173 /* Wait until IDLE & CLEAN */ 189 /* Wait until IDLE & CLEAN */
174 radeon_ring_write(rdev, PACKET0(0x1720, 0)); 190 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
175 radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9)); 191 radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN |
192 RADEON_WAIT_2D_IDLECLEAN |
193 RADEON_WAIT_DMA_GUI_IDLE));
194 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
195 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
196 RADEON_HDP_READ_BUFFER_INVALIDATE);
197 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
198 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
176 /* Emit fence sequence & fire IRQ */ 199 /* Emit fence sequence & fire IRQ */
177 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); 200 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
178 radeon_ring_write(rdev, fence->seq); 201 radeon_ring_write(rdev, fence->seq);
@@ -180,50 +203,6 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
180 radeon_ring_write(rdev, RADEON_SW_INT_FIRE); 203 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
181} 204}
182 205
183int r300_copy_dma(struct radeon_device *rdev,
184 uint64_t src_offset,
185 uint64_t dst_offset,
186 unsigned num_pages,
187 struct radeon_fence *fence)
188{
189 uint32_t size;
190 uint32_t cur_size;
191 int i, num_loops;
192 int r = 0;
193
194 /* radeon pitch is /64 */
195 size = num_pages << PAGE_SHIFT;
196 num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
197 r = radeon_ring_lock(rdev, num_loops * 4 + 64);
198 if (r) {
199 DRM_ERROR("radeon: moving bo (%d).\n", r);
200 return r;
201 }
202 /* Must wait for 2D idle & clean before DMA or hangs might happen */
203 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 ));
204 radeon_ring_write(rdev, (1 << 16));
205 for (i = 0; i < num_loops; i++) {
206 cur_size = size;
207 if (cur_size > 0x1FFFFF) {
208 cur_size = 0x1FFFFF;
209 }
210 size -= cur_size;
211 radeon_ring_write(rdev, PACKET0(0x720, 2));
212 radeon_ring_write(rdev, src_offset);
213 radeon_ring_write(rdev, dst_offset);
214 radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
215 src_offset += cur_size;
216 dst_offset += cur_size;
217 }
218 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
219 radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
220 if (fence) {
221 r = radeon_fence_emit(rdev, fence);
222 }
223 radeon_ring_unlock_commit(rdev);
224 return r;
225}
226
227void r300_ring_start(struct radeon_device *rdev) 206void r300_ring_start(struct radeon_device *rdev)
228{ 207{
229 unsigned gb_tile_config; 208 unsigned gb_tile_config;
@@ -263,8 +242,8 @@ void r300_ring_start(struct radeon_device *rdev)
263 radeon_ring_write(rdev, 242 radeon_ring_write(rdev,
264 RADEON_WAIT_2D_IDLECLEAN | 243 RADEON_WAIT_2D_IDLECLEAN |
265 RADEON_WAIT_3D_IDLECLEAN); 244 RADEON_WAIT_3D_IDLECLEAN);
266 radeon_ring_write(rdev, PACKET0(0x170C, 0)); 245 radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
267 radeon_ring_write(rdev, 1 << 31); 246 radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
268 radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0)); 247 radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
269 radeon_ring_write(rdev, 0); 248 radeon_ring_write(rdev, 0);
270 radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0)); 249 radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
@@ -331,8 +310,8 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev)
331 310
332 for (i = 0; i < rdev->usec_timeout; i++) { 311 for (i = 0; i < rdev->usec_timeout; i++) {
333 /* read MC_STATUS */ 312 /* read MC_STATUS */
334 tmp = RREG32(0x0150); 313 tmp = RREG32(RADEON_MC_STATUS);
335 if (tmp & (1 << 4)) { 314 if (tmp & R300_MC_IDLE) {
336 return 0; 315 return 0;
337 } 316 }
338 DRM_UDELAY(1); 317 DRM_UDELAY(1);
@@ -345,12 +324,12 @@ void r300_gpu_init(struct radeon_device *rdev)
345 uint32_t gb_tile_config, tmp; 324 uint32_t gb_tile_config, tmp;
346 325
347 r100_hdp_reset(rdev); 326 r100_hdp_reset(rdev);
348 /* FIXME: rv380 one pipes ? */ 327 if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
349 if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) { 328 (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
350 /* r300,r350 */ 329 /* r300,r350 */
351 rdev->num_gb_pipes = 2; 330 rdev->num_gb_pipes = 2;
352 } else { 331 } else {
353 /* rv350,rv370,rv380 */ 332 /* rv350,rv370,rv380,r300 AD, r350 AH */
354 rdev->num_gb_pipes = 1; 333 rdev->num_gb_pipes = 1;
355 } 334 }
356 rdev->num_z_pipes = 1; 335 rdev->num_z_pipes = 1;
@@ -377,8 +356,8 @@ void r300_gpu_init(struct radeon_device *rdev)
377 "programming pipes. Bad things might happen.\n"); 356 "programming pipes. Bad things might happen.\n");
378 } 357 }
379 358
380 tmp = RREG32(0x170C); 359 tmp = RREG32(R300_DST_PIPE_CONFIG);
381 WREG32(0x170C, tmp | (1 << 31)); 360 WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
382 361
383 WREG32(R300_RB2D_DSTCACHE_MODE, 362 WREG32(R300_RB2D_DSTCACHE_MODE,
384 R300_DC_AUTOFLUSH_ENABLE | 363 R300_DC_AUTOFLUSH_ENABLE |
@@ -419,8 +398,8 @@ int r300_ga_reset(struct radeon_device *rdev)
419 /* GA still busy soft reset it */ 398 /* GA still busy soft reset it */
420 WREG32(0x429C, 0x200); 399 WREG32(0x429C, 0x200);
421 WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0); 400 WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
422 WREG32(0x43E0, 0); 401 WREG32(R300_RE_SCISSORS_TL, 0);
423 WREG32(0x43E4, 0); 402 WREG32(R300_RE_SCISSORS_BR, 0);
424 WREG32(0x24AC, 0); 403 WREG32(0x24AC, 0);
425 } 404 }
426 /* Wait to prevent race in RBBM_STATUS */ 405 /* Wait to prevent race in RBBM_STATUS */
@@ -470,7 +449,7 @@ int r300_gpu_reset(struct radeon_device *rdev)
470 } 449 }
471 /* Check if GPU is idle */ 450 /* Check if GPU is idle */
472 status = RREG32(RADEON_RBBM_STATUS); 451 status = RREG32(RADEON_RBBM_STATUS);
473 if (status & (1 << 31)) { 452 if (status & RADEON_RBBM_ACTIVE) {
474 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); 453 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
475 return -1; 454 return -1;
476 } 455 }
@@ -482,20 +461,29 @@ int r300_gpu_reset(struct radeon_device *rdev)
482/* 461/*
483 * r300,r350,rv350,rv380 VRAM info 462 * r300,r350,rv350,rv380 VRAM info
484 */ 463 */
485void r300_vram_info(struct radeon_device *rdev) 464void r300_mc_init(struct radeon_device *rdev)
486{ 465{
487 uint32_t tmp; 466 u64 base;
467 u32 tmp;
488 468
489 /* DDR for all card after R300 & IGP */ 469 /* DDR for all card after R300 & IGP */
490 rdev->mc.vram_is_ddr = true; 470 rdev->mc.vram_is_ddr = true;
491 tmp = RREG32(RADEON_MEM_CNTL); 471 tmp = RREG32(RADEON_MEM_CNTL);
492 if (tmp & R300_MEM_NUM_CHANNELS_MASK) { 472 tmp &= R300_MEM_NUM_CHANNELS_MASK;
493 rdev->mc.vram_width = 128; 473 switch (tmp) {
494 } else { 474 case 0: rdev->mc.vram_width = 64; break;
495 rdev->mc.vram_width = 64; 475 case 1: rdev->mc.vram_width = 128; break;
476 case 2: rdev->mc.vram_width = 256; break;
477 default: rdev->mc.vram_width = 128; break;
496 } 478 }
497
498 r100_vram_init_sizes(rdev); 479 r100_vram_init_sizes(rdev);
480 base = rdev->mc.aper_base;
481 if (rdev->flags & RADEON_IS_IGP)
482 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
483 radeon_vram_location(rdev, &rdev->mc, base);
484 if (!(rdev->flags & RADEON_IS_AGP))
485 radeon_gtt_location(rdev, &rdev->mc);
486 radeon_update_bandwidth_info(rdev);
499} 487}
500 488
501void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) 489void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
@@ -557,6 +545,40 @@ void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
557 545
558} 546}
559 547
548int rv370_get_pcie_lanes(struct radeon_device *rdev)
549{
550 u32 link_width_cntl;
551
552 if (rdev->flags & RADEON_IS_IGP)
553 return 0;
554
555 if (!(rdev->flags & RADEON_IS_PCIE))
556 return 0;
557
558 /* FIXME wait for idle */
559
560 if (rdev->family < CHIP_R600)
561 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
562 else
563 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
564
565 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
566 case RADEON_PCIE_LC_LINK_WIDTH_X0:
567 return 0;
568 case RADEON_PCIE_LC_LINK_WIDTH_X1:
569 return 1;
570 case RADEON_PCIE_LC_LINK_WIDTH_X2:
571 return 2;
572 case RADEON_PCIE_LC_LINK_WIDTH_X4:
573 return 4;
574 case RADEON_PCIE_LC_LINK_WIDTH_X8:
575 return 8;
576 case RADEON_PCIE_LC_LINK_WIDTH_X16:
577 default:
578 return 16;
579 }
580}
581
560#if defined(CONFIG_DEBUG_FS) 582#if defined(CONFIG_DEBUG_FS)
561static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) 583static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
562{ 584{
@@ -681,7 +703,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
681 r100_cs_dump_packet(p, pkt); 703 r100_cs_dump_packet(p, pkt);
682 return r; 704 return r;
683 } 705 }
684 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 706
707 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
708 tile_flags |= R300_TXO_MACRO_TILE;
709 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
710 tile_flags |= R300_TXO_MICRO_TILE;
711 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
712 tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
713
714 tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
715 tmp |= tile_flags;
716 ib[idx] = tmp;
685 track->textures[i].robj = reloc->robj; 717 track->textures[i].robj = reloc->robj;
686 break; 718 break;
687 /* Tracked registers */ 719 /* Tracked registers */
@@ -697,6 +729,12 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
697 /* VAP_VF_MAX_VTX_INDX */ 729 /* VAP_VF_MAX_VTX_INDX */
698 track->max_indx = idx_value & 0x00FFFFFFUL; 730 track->max_indx = idx_value & 0x00FFFFFFUL;
699 break; 731 break;
732 case 0x2088:
733 /* VAP_ALT_NUM_VERTICES - only valid on r500 */
734 if (p->rdev->family < CHIP_RV515)
735 goto fail;
736 track->vap_alt_nverts = idx_value & 0xFFFFFF;
737 break;
700 case 0x43E4: 738 case 0x43E4:
701 /* SC_SCISSOR1 */ 739 /* SC_SCISSOR1 */
702 track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; 740 track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
@@ -728,11 +766,12 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
728 tile_flags |= R300_COLOR_TILE_ENABLE; 766 tile_flags |= R300_COLOR_TILE_ENABLE;
729 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 767 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
730 tile_flags |= R300_COLOR_MICROTILE_ENABLE; 768 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
769 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
770 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
731 771
732 tmp = idx_value & ~(0x7 << 16); 772 tmp = idx_value & ~(0x7 << 16);
733 tmp |= tile_flags; 773 tmp |= tile_flags;
734 ib[idx] = tmp; 774 ib[idx] = tmp;
735
736 i = (reg - 0x4E38) >> 2; 775 i = (reg - 0x4E38) >> 2;
737 track->cb[i].pitch = idx_value & 0x3FFE; 776 track->cb[i].pitch = idx_value & 0x3FFE;
738 switch (((idx_value >> 21) & 0xF)) { 777 switch (((idx_value >> 21) & 0xF)) {
@@ -799,7 +838,9 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
799 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 838 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
800 tile_flags |= R300_DEPTHMACROTILE_ENABLE; 839 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
801 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 840 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
802 tile_flags |= R300_DEPTHMICROTILE_TILED;; 841 tile_flags |= R300_DEPTHMICROTILE_TILED;
842 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
843 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
803 844
804 tmp = idx_value & ~(0x7 << 16); 845 tmp = idx_value & ~(0x7 << 16);
805 tmp |= tile_flags; 846 tmp |= tile_flags;
@@ -847,7 +888,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
847 case R300_TX_FORMAT_Z6Y5X5: 888 case R300_TX_FORMAT_Z6Y5X5:
848 case R300_TX_FORMAT_W4Z4Y4X4: 889 case R300_TX_FORMAT_W4Z4Y4X4:
849 case R300_TX_FORMAT_W1Z5Y5X5: 890 case R300_TX_FORMAT_W1Z5Y5X5:
850 case R300_TX_FORMAT_DXT1:
851 case R300_TX_FORMAT_D3DMFT_CxV8U8: 891 case R300_TX_FORMAT_D3DMFT_CxV8U8:
852 case R300_TX_FORMAT_B8G8_B8G8: 892 case R300_TX_FORMAT_B8G8_B8G8:
853 case R300_TX_FORMAT_G8R8_G8B8: 893 case R300_TX_FORMAT_G8R8_G8B8:
@@ -861,8 +901,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
861 case 0x17: 901 case 0x17:
862 case R300_TX_FORMAT_FL_I32: 902 case R300_TX_FORMAT_FL_I32:
863 case 0x1e: 903 case 0x1e:
864 case R300_TX_FORMAT_DXT3:
865 case R300_TX_FORMAT_DXT5:
866 track->textures[i].cpp = 4; 904 track->textures[i].cpp = 4;
867 break; 905 break;
868 case R300_TX_FORMAT_W16Z16Y16X16: 906 case R300_TX_FORMAT_W16Z16Y16X16:
@@ -873,6 +911,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
873 case R300_TX_FORMAT_FL_R32G32B32A32: 911 case R300_TX_FORMAT_FL_R32G32B32A32:
874 track->textures[i].cpp = 16; 912 track->textures[i].cpp = 16;
875 break; 913 break;
914 case R300_TX_FORMAT_DXT1:
915 track->textures[i].cpp = 1;
916 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
917 break;
918 case R300_TX_FORMAT_ATI2N:
919 if (p->rdev->family < CHIP_R420) {
920 DRM_ERROR("Invalid texture format %u\n",
921 (idx_value & 0x1F));
922 return -EINVAL;
923 }
924 /* The same rules apply as for DXT3/5. */
925 /* Pass through. */
926 case R300_TX_FORMAT_DXT3:
927 case R300_TX_FORMAT_DXT5:
928 track->textures[i].cpp = 1;
929 track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
930 break;
876 default: 931 default:
877 DRM_ERROR("Invalid texture format %u\n", 932 DRM_ERROR("Invalid texture format %u\n",
878 (idx_value & 0x1F)); 933 (idx_value & 0x1F));
@@ -932,6 +987,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
932 track->textures[i].width_11 = tmp; 987 track->textures[i].width_11 = tmp;
933 tmp = ((idx_value >> 16) & 1) << 11; 988 tmp = ((idx_value >> 16) & 1) << 11;
934 track->textures[i].height_11 = tmp; 989 track->textures[i].height_11 = tmp;
990
991 /* ATI1N */
992 if (idx_value & (1 << 14)) {
993 /* The same rules apply as for DXT1. */
994 track->textures[i].compress_format =
995 R100_TRACK_COMP_DXT1;
996 }
997 } else if (idx_value & (1 << 14)) {
998 DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
999 return -EINVAL;
935 } 1000 }
936 break; 1001 break;
937 case 0x4480: 1002 case 0x4480:
@@ -973,17 +1038,31 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
973 } 1038 }
974 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1039 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
975 break; 1040 break;
1041 case 0x4e0c:
1042 /* RB3D_COLOR_CHANNEL_MASK */
1043 track->color_channel_mask = idx_value;
1044 break;
1045 case 0x4d1c:
1046 /* ZB_BW_CNTL */
1047 track->zb_cb_clear = !!(idx_value & (1 << 5));
1048 break;
1049 case 0x4e04:
1050 /* RB3D_BLENDCNTL */
1051 track->blend_read_enable = !!(idx_value & (1 << 2));
1052 break;
976 case 0x4be8: 1053 case 0x4be8:
977 /* valid register only on RV530 */ 1054 /* valid register only on RV530 */
978 if (p->rdev->family == CHIP_RV530) 1055 if (p->rdev->family == CHIP_RV530)
979 break; 1056 break;
980 /* fallthrough do not move */ 1057 /* fallthrough do not move */
981 default: 1058 default:
982 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 1059 goto fail;
983 reg, idx);
984 return -EINVAL;
985 } 1060 }
986 return 0; 1061 return 0;
1062fail:
1063 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1064 reg, idx);
1065 return -EINVAL;
987} 1066}
988 1067
989static int r300_packet3_check(struct radeon_cs_parser *p, 1068static int r300_packet3_check(struct radeon_cs_parser *p,
@@ -1181,6 +1260,9 @@ static int r300_startup(struct radeon_device *rdev)
1181{ 1260{
1182 int r; 1261 int r;
1183 1262
1263 /* set common regs */
1264 r100_set_common_regs(rdev);
1265 /* program mc */
1184 r300_mc_program(rdev); 1266 r300_mc_program(rdev);
1185 /* Resume clock */ 1267 /* Resume clock */
1186 r300_clock_startup(rdev); 1268 r300_clock_startup(rdev);
@@ -1193,14 +1275,20 @@ static int r300_startup(struct radeon_device *rdev)
1193 if (r) 1275 if (r)
1194 return r; 1276 return r;
1195 } 1277 }
1278
1279 if (rdev->family == CHIP_R300 ||
1280 rdev->family == CHIP_R350 ||
1281 rdev->family == CHIP_RV350)
1282 r100_enable_bm(rdev);
1283
1196 if (rdev->flags & RADEON_IS_PCI) { 1284 if (rdev->flags & RADEON_IS_PCI) {
1197 r = r100_pci_gart_enable(rdev); 1285 r = r100_pci_gart_enable(rdev);
1198 if (r) 1286 if (r)
1199 return r; 1287 return r;
1200 } 1288 }
1201 /* Enable IRQ */ 1289 /* Enable IRQ */
1202 rdev->irq.sw_int = true;
1203 r100_irq_set(rdev); 1290 r100_irq_set(rdev);
1291 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
1204 /* 1M ring buffer */ 1292 /* 1M ring buffer */
1205 r = r100_cp_init(rdev, 1024 * 1024); 1293 r = r100_cp_init(rdev, 1024 * 1024);
1206 if (r) { 1294 if (r) {
@@ -1237,6 +1325,8 @@ int r300_resume(struct radeon_device *rdev)
1237 radeon_combios_asic_init(rdev->ddev); 1325 radeon_combios_asic_init(rdev->ddev);
1238 /* Resume clock after posting */ 1326 /* Resume clock after posting */
1239 r300_clock_startup(rdev); 1327 r300_clock_startup(rdev);
1328 /* Initialize surface registers */
1329 radeon_surface_init(rdev);
1240 return r300_startup(rdev); 1330 return r300_startup(rdev);
1241} 1331}
1242 1332
@@ -1254,7 +1344,7 @@ int r300_suspend(struct radeon_device *rdev)
1254 1344
1255void r300_fini(struct radeon_device *rdev) 1345void r300_fini(struct radeon_device *rdev)
1256{ 1346{
1257 r300_suspend(rdev); 1347 radeon_pm_fini(rdev);
1258 r100_cp_fini(rdev); 1348 r100_cp_fini(rdev);
1259 r100_wb_fini(rdev); 1349 r100_wb_fini(rdev);
1260 r100_ib_fini(rdev); 1350 r100_ib_fini(rdev);
@@ -1263,9 +1353,10 @@ void r300_fini(struct radeon_device *rdev)
1263 rv370_pcie_gart_fini(rdev); 1353 rv370_pcie_gart_fini(rdev);
1264 if (rdev->flags & RADEON_IS_PCI) 1354 if (rdev->flags & RADEON_IS_PCI)
1265 r100_pci_gart_fini(rdev); 1355 r100_pci_gart_fini(rdev);
1356 radeon_agp_fini(rdev);
1266 radeon_irq_kms_fini(rdev); 1357 radeon_irq_kms_fini(rdev);
1267 radeon_fence_driver_fini(rdev); 1358 radeon_fence_driver_fini(rdev);
1268 radeon_object_fini(rdev); 1359 radeon_bo_fini(rdev);
1269 radeon_atombios_fini(rdev); 1360 radeon_atombios_fini(rdev);
1270 kfree(rdev->bios); 1361 kfree(rdev->bios);
1271 rdev->bios = NULL; 1362 rdev->bios = NULL;
@@ -1303,20 +1394,23 @@ int r300_init(struct radeon_device *rdev)
1303 RREG32(R_0007C0_CP_STAT)); 1394 RREG32(R_0007C0_CP_STAT));
1304 } 1395 }
1305 /* check if cards are posted or not */ 1396 /* check if cards are posted or not */
1306 if (!radeon_card_posted(rdev) && rdev->bios) { 1397 if (radeon_boot_test_post_card(rdev) == false)
1307 DRM_INFO("GPU not posted. posting now...\n"); 1398 return -EINVAL;
1308 radeon_combios_asic_init(rdev->ddev);
1309 }
1310 /* Set asic errata */ 1399 /* Set asic errata */
1311 r300_errata(rdev); 1400 r300_errata(rdev);
1312 /* Initialize clocks */ 1401 /* Initialize clocks */
1313 radeon_get_clock_info(rdev->ddev); 1402 radeon_get_clock_info(rdev->ddev);
1314 /* Get vram informations */ 1403 /* Initialize power management */
1315 r300_vram_info(rdev); 1404 radeon_pm_init(rdev);
1316 /* Initialize memory controller (also test AGP) */ 1405 /* initialize AGP */
1317 r = r420_mc_init(rdev); 1406 if (rdev->flags & RADEON_IS_AGP) {
1318 if (r) 1407 r = radeon_agp_init(rdev);
1319 return r; 1408 if (r) {
1409 radeon_agp_disable(rdev);
1410 }
1411 }
1412 /* initialize memory controller */
1413 r300_mc_init(rdev);
1320 /* Fence driver */ 1414 /* Fence driver */
1321 r = radeon_fence_driver_init(rdev); 1415 r = radeon_fence_driver_init(rdev);
1322 if (r) 1416 if (r)
@@ -1325,7 +1419,7 @@ int r300_init(struct radeon_device *rdev)
1325 if (r) 1419 if (r)
1326 return r; 1420 return r;
1327 /* Memory manager */ 1421 /* Memory manager */
1328 r = radeon_object_init(rdev); 1422 r = radeon_bo_init(rdev);
1329 if (r) 1423 if (r)
1330 return r; 1424 return r;
1331 if (rdev->flags & RADEON_IS_PCIE) { 1425 if (rdev->flags & RADEON_IS_PCIE) {
@@ -1344,15 +1438,15 @@ int r300_init(struct radeon_device *rdev)
1344 if (r) { 1438 if (r) {
1345 /* Somethings want wront with the accel init stop accel */ 1439 /* Somethings want wront with the accel init stop accel */
1346 dev_err(rdev->dev, "Disabling GPU acceleration\n"); 1440 dev_err(rdev->dev, "Disabling GPU acceleration\n");
1347 r300_suspend(rdev);
1348 r100_cp_fini(rdev); 1441 r100_cp_fini(rdev);
1349 r100_wb_fini(rdev); 1442 r100_wb_fini(rdev);
1350 r100_ib_fini(rdev); 1443 r100_ib_fini(rdev);
1444 radeon_irq_kms_fini(rdev);
1351 if (rdev->flags & RADEON_IS_PCIE) 1445 if (rdev->flags & RADEON_IS_PCIE)
1352 rv370_pcie_gart_fini(rdev); 1446 rv370_pcie_gart_fini(rdev);
1353 if (rdev->flags & RADEON_IS_PCI) 1447 if (rdev->flags & RADEON_IS_PCI)
1354 r100_pci_gart_fini(rdev); 1448 r100_pci_gart_fini(rdev);
1355 radeon_irq_kms_fini(rdev); 1449 radeon_agp_fini(rdev);
1356 rdev->accel_working = false; 1450 rdev->accel_working = false;
1357 } 1451 }
1358 return 0; 1452 return 0;