diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/r100.c')
-rw-r--r-- | drivers/gpu/drm/radeon/r100.c | 287 |
1 files changed, 170 insertions, 117 deletions
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index e59422320bb6..f2204cb1ccdf 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -68,6 +68,39 @@ MODULE_FIRMWARE(FIRMWARE_R520); | |||
68 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 | 68 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 |
69 | */ | 69 | */ |
70 | 70 | ||
71 | void r100_pre_page_flip(struct radeon_device *rdev, int crtc) | ||
72 | { | ||
73 | /* enable the pflip int */ | ||
74 | radeon_irq_kms_pflip_irq_get(rdev, crtc); | ||
75 | } | ||
76 | |||
77 | void r100_post_page_flip(struct radeon_device *rdev, int crtc) | ||
78 | { | ||
79 | /* disable the pflip int */ | ||
80 | radeon_irq_kms_pflip_irq_put(rdev, crtc); | ||
81 | } | ||
82 | |||
83 | u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) | ||
84 | { | ||
85 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; | ||
86 | u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; | ||
87 | |||
88 | /* Lock the graphics update lock */ | ||
89 | /* update the scanout addresses */ | ||
90 | WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); | ||
91 | |||
92 | /* Wait for update_pending to go high. */ | ||
93 | while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)); | ||
94 | DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); | ||
95 | |||
96 | /* Unlock the lock, so double-buffering can take place inside vblank */ | ||
97 | tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK; | ||
98 | WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); | ||
99 | |||
100 | /* Return current update_pending status: */ | ||
101 | return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET; | ||
102 | } | ||
103 | |||
71 | void r100_pm_get_dynpm_state(struct radeon_device *rdev) | 104 | void r100_pm_get_dynpm_state(struct radeon_device *rdev) |
72 | { | 105 | { |
73 | int i; | 106 | int i; |
@@ -442,7 +475,7 @@ int r100_pci_gart_init(struct radeon_device *rdev) | |||
442 | int r; | 475 | int r; |
443 | 476 | ||
444 | if (rdev->gart.table.ram.ptr) { | 477 | if (rdev->gart.table.ram.ptr) { |
445 | WARN(1, "R100 PCI GART already initialized.\n"); | 478 | WARN(1, "R100 PCI GART already initialized\n"); |
446 | return 0; | 479 | return 0; |
447 | } | 480 | } |
448 | /* Initialize common gart structure */ | 481 | /* Initialize common gart structure */ |
@@ -516,7 +549,7 @@ int r100_irq_set(struct radeon_device *rdev) | |||
516 | uint32_t tmp = 0; | 549 | uint32_t tmp = 0; |
517 | 550 | ||
518 | if (!rdev->irq.installed) { | 551 | if (!rdev->irq.installed) { |
519 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); | 552 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
520 | WREG32(R_000040_GEN_INT_CNTL, 0); | 553 | WREG32(R_000040_GEN_INT_CNTL, 0); |
521 | return -EINVAL; | 554 | return -EINVAL; |
522 | } | 555 | } |
@@ -526,10 +559,12 @@ int r100_irq_set(struct radeon_device *rdev) | |||
526 | if (rdev->irq.gui_idle) { | 559 | if (rdev->irq.gui_idle) { |
527 | tmp |= RADEON_GUI_IDLE_MASK; | 560 | tmp |= RADEON_GUI_IDLE_MASK; |
528 | } | 561 | } |
529 | if (rdev->irq.crtc_vblank_int[0]) { | 562 | if (rdev->irq.crtc_vblank_int[0] || |
563 | rdev->irq.pflip[0]) { | ||
530 | tmp |= RADEON_CRTC_VBLANK_MASK; | 564 | tmp |= RADEON_CRTC_VBLANK_MASK; |
531 | } | 565 | } |
532 | if (rdev->irq.crtc_vblank_int[1]) { | 566 | if (rdev->irq.crtc_vblank_int[1] || |
567 | rdev->irq.pflip[1]) { | ||
533 | tmp |= RADEON_CRTC2_VBLANK_MASK; | 568 | tmp |= RADEON_CRTC2_VBLANK_MASK; |
534 | } | 569 | } |
535 | if (rdev->irq.hpd[0]) { | 570 | if (rdev->irq.hpd[0]) { |
@@ -600,14 +635,22 @@ int r100_irq_process(struct radeon_device *rdev) | |||
600 | } | 635 | } |
601 | /* Vertical blank interrupts */ | 636 | /* Vertical blank interrupts */ |
602 | if (status & RADEON_CRTC_VBLANK_STAT) { | 637 | if (status & RADEON_CRTC_VBLANK_STAT) { |
603 | drm_handle_vblank(rdev->ddev, 0); | 638 | if (rdev->irq.crtc_vblank_int[0]) { |
604 | rdev->pm.vblank_sync = true; | 639 | drm_handle_vblank(rdev->ddev, 0); |
605 | wake_up(&rdev->irq.vblank_queue); | 640 | rdev->pm.vblank_sync = true; |
641 | wake_up(&rdev->irq.vblank_queue); | ||
642 | } | ||
643 | if (rdev->irq.pflip[0]) | ||
644 | radeon_crtc_handle_flip(rdev, 0); | ||
606 | } | 645 | } |
607 | if (status & RADEON_CRTC2_VBLANK_STAT) { | 646 | if (status & RADEON_CRTC2_VBLANK_STAT) { |
608 | drm_handle_vblank(rdev->ddev, 1); | 647 | if (rdev->irq.crtc_vblank_int[1]) { |
609 | rdev->pm.vblank_sync = true; | 648 | drm_handle_vblank(rdev->ddev, 1); |
610 | wake_up(&rdev->irq.vblank_queue); | 649 | rdev->pm.vblank_sync = true; |
650 | wake_up(&rdev->irq.vblank_queue); | ||
651 | } | ||
652 | if (rdev->irq.pflip[1]) | ||
653 | radeon_crtc_handle_flip(rdev, 1); | ||
611 | } | 654 | } |
612 | if (status & RADEON_FP_DETECT_STAT) { | 655 | if (status & RADEON_FP_DETECT_STAT) { |
613 | queue_hotplug = true; | 656 | queue_hotplug = true; |
@@ -622,7 +665,7 @@ int r100_irq_process(struct radeon_device *rdev) | |||
622 | /* reset gui idle ack. the status bit is broken */ | 665 | /* reset gui idle ack. the status bit is broken */ |
623 | rdev->irq.gui_idle_acked = false; | 666 | rdev->irq.gui_idle_acked = false; |
624 | if (queue_hotplug) | 667 | if (queue_hotplug) |
625 | queue_work(rdev->wq, &rdev->hotplug_work); | 668 | schedule_work(&rdev->hotplug_work); |
626 | if (rdev->msi_enabled) { | 669 | if (rdev->msi_enabled) { |
627 | switch (rdev->family) { | 670 | switch (rdev->family) { |
628 | case CHIP_RS400: | 671 | case CHIP_RS400: |
@@ -675,67 +718,6 @@ void r100_fence_ring_emit(struct radeon_device *rdev, | |||
675 | radeon_ring_write(rdev, RADEON_SW_INT_FIRE); | 718 | radeon_ring_write(rdev, RADEON_SW_INT_FIRE); |
676 | } | 719 | } |
677 | 720 | ||
678 | int r100_wb_init(struct radeon_device *rdev) | ||
679 | { | ||
680 | int r; | ||
681 | |||
682 | if (rdev->wb.wb_obj == NULL) { | ||
683 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, | ||
684 | RADEON_GEM_DOMAIN_GTT, | ||
685 | &rdev->wb.wb_obj); | ||
686 | if (r) { | ||
687 | dev_err(rdev->dev, "(%d) create WB buffer failed\n", r); | ||
688 | return r; | ||
689 | } | ||
690 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); | ||
691 | if (unlikely(r != 0)) | ||
692 | return r; | ||
693 | r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, | ||
694 | &rdev->wb.gpu_addr); | ||
695 | if (r) { | ||
696 | dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r); | ||
697 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
698 | return r; | ||
699 | } | ||
700 | r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); | ||
701 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
702 | if (r) { | ||
703 | dev_err(rdev->dev, "(%d) map WB buffer failed\n", r); | ||
704 | return r; | ||
705 | } | ||
706 | } | ||
707 | WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr); | ||
708 | WREG32(R_00070C_CP_RB_RPTR_ADDR, | ||
709 | S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + 1024) >> 2)); | ||
710 | WREG32(R_000770_SCRATCH_UMSK, 0xff); | ||
711 | return 0; | ||
712 | } | ||
713 | |||
714 | void r100_wb_disable(struct radeon_device *rdev) | ||
715 | { | ||
716 | WREG32(R_000770_SCRATCH_UMSK, 0); | ||
717 | } | ||
718 | |||
719 | void r100_wb_fini(struct radeon_device *rdev) | ||
720 | { | ||
721 | int r; | ||
722 | |||
723 | r100_wb_disable(rdev); | ||
724 | if (rdev->wb.wb_obj) { | ||
725 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); | ||
726 | if (unlikely(r != 0)) { | ||
727 | dev_err(rdev->dev, "(%d) can't finish WB\n", r); | ||
728 | return; | ||
729 | } | ||
730 | radeon_bo_kunmap(rdev->wb.wb_obj); | ||
731 | radeon_bo_unpin(rdev->wb.wb_obj); | ||
732 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
733 | radeon_bo_unref(&rdev->wb.wb_obj); | ||
734 | rdev->wb.wb = NULL; | ||
735 | rdev->wb.wb_obj = NULL; | ||
736 | } | ||
737 | } | ||
738 | |||
739 | int r100_copy_blit(struct radeon_device *rdev, | 721 | int r100_copy_blit(struct radeon_device *rdev, |
740 | uint64_t src_offset, | 722 | uint64_t src_offset, |
741 | uint64_t dst_offset, | 723 | uint64_t dst_offset, |
@@ -996,20 +978,32 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
996 | WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); | 978 | WREG32(0x718, pre_write_timer | (pre_write_limit << 28)); |
997 | tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | | 979 | tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) | |
998 | REG_SET(RADEON_RB_BLKSZ, rb_blksz) | | 980 | REG_SET(RADEON_RB_BLKSZ, rb_blksz) | |
999 | REG_SET(RADEON_MAX_FETCH, max_fetch) | | 981 | REG_SET(RADEON_MAX_FETCH, max_fetch)); |
1000 | RADEON_RB_NO_UPDATE); | ||
1001 | #ifdef __BIG_ENDIAN | 982 | #ifdef __BIG_ENDIAN |
1002 | tmp |= RADEON_BUF_SWAP_32BIT; | 983 | tmp |= RADEON_BUF_SWAP_32BIT; |
1003 | #endif | 984 | #endif |
1004 | WREG32(RADEON_CP_RB_CNTL, tmp); | 985 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE); |
1005 | 986 | ||
1006 | /* Set ring address */ | 987 | /* Set ring address */ |
1007 | DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr); | 988 | DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr); |
1008 | WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr); | 989 | WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr); |
1009 | /* Force read & write ptr to 0 */ | 990 | /* Force read & write ptr to 0 */ |
1010 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); | 991 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); |
1011 | WREG32(RADEON_CP_RB_RPTR_WR, 0); | 992 | WREG32(RADEON_CP_RB_RPTR_WR, 0); |
1012 | WREG32(RADEON_CP_RB_WPTR, 0); | 993 | WREG32(RADEON_CP_RB_WPTR, 0); |
994 | |||
995 | /* set the wb address whether it's enabled or not */ | ||
996 | WREG32(R_00070C_CP_RB_RPTR_ADDR, | ||
997 | S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2)); | ||
998 | WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET); | ||
999 | |||
1000 | if (rdev->wb.enabled) | ||
1001 | WREG32(R_000770_SCRATCH_UMSK, 0xff); | ||
1002 | else { | ||
1003 | tmp |= RADEON_RB_NO_UPDATE; | ||
1004 | WREG32(R_000770_SCRATCH_UMSK, 0); | ||
1005 | } | ||
1006 | |||
1013 | WREG32(RADEON_CP_RB_CNTL, tmp); | 1007 | WREG32(RADEON_CP_RB_CNTL, tmp); |
1014 | udelay(10); | 1008 | udelay(10); |
1015 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); | 1009 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); |
@@ -1020,8 +1014,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
1020 | WREG32(RADEON_CP_CSQ_MODE, | 1014 | WREG32(RADEON_CP_CSQ_MODE, |
1021 | REG_SET(RADEON_INDIRECT2_START, indirect2_start) | | 1015 | REG_SET(RADEON_INDIRECT2_START, indirect2_start) | |
1022 | REG_SET(RADEON_INDIRECT1_START, indirect1_start)); | 1016 | REG_SET(RADEON_INDIRECT1_START, indirect1_start)); |
1023 | WREG32(0x718, 0); | 1017 | WREG32(RADEON_CP_RB_WPTR_DELAY, 0); |
1024 | WREG32(0x744, 0x00004D4D); | 1018 | WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D); |
1025 | WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); | 1019 | WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); |
1026 | radeon_ring_start(rdev); | 1020 | radeon_ring_start(rdev); |
1027 | r = radeon_ring_test(rdev); | 1021 | r = radeon_ring_test(rdev); |
@@ -1030,7 +1024,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
1030 | return r; | 1024 | return r; |
1031 | } | 1025 | } |
1032 | rdev->cp.ready = true; | 1026 | rdev->cp.ready = true; |
1033 | rdev->mc.active_vram_size = rdev->mc.real_vram_size; | 1027 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); |
1034 | return 0; | 1028 | return 0; |
1035 | } | 1029 | } |
1036 | 1030 | ||
@@ -1048,10 +1042,11 @@ void r100_cp_fini(struct radeon_device *rdev) | |||
1048 | void r100_cp_disable(struct radeon_device *rdev) | 1042 | void r100_cp_disable(struct radeon_device *rdev) |
1049 | { | 1043 | { |
1050 | /* Disable ring */ | 1044 | /* Disable ring */ |
1051 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | 1045 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
1052 | rdev->cp.ready = false; | 1046 | rdev->cp.ready = false; |
1053 | WREG32(RADEON_CP_CSQ_MODE, 0); | 1047 | WREG32(RADEON_CP_CSQ_MODE, 0); |
1054 | WREG32(RADEON_CP_CSQ_CNTL, 0); | 1048 | WREG32(RADEON_CP_CSQ_CNTL, 0); |
1049 | WREG32(R_000770_SCRATCH_UMSK, 0); | ||
1055 | if (r100_gui_wait_for_idle(rdev)) { | 1050 | if (r100_gui_wait_for_idle(rdev)) { |
1056 | printk(KERN_WARNING "Failed to wait GUI idle while " | 1051 | printk(KERN_WARNING "Failed to wait GUI idle while " |
1057 | "programming pipes. Bad things might happen.\n"); | 1052 | "programming pipes. Bad things might happen.\n"); |
@@ -1210,14 +1205,12 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
1210 | if (waitreloc.reg != RADEON_WAIT_UNTIL || | 1205 | if (waitreloc.reg != RADEON_WAIT_UNTIL || |
1211 | waitreloc.count != 0) { | 1206 | waitreloc.count != 0) { |
1212 | DRM_ERROR("vline wait had illegal wait until segment\n"); | 1207 | DRM_ERROR("vline wait had illegal wait until segment\n"); |
1213 | r = -EINVAL; | 1208 | return -EINVAL; |
1214 | return r; | ||
1215 | } | 1209 | } |
1216 | 1210 | ||
1217 | if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) { | 1211 | if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) { |
1218 | DRM_ERROR("vline wait had illegal wait until\n"); | 1212 | DRM_ERROR("vline wait had illegal wait until\n"); |
1219 | r = -EINVAL; | 1213 | return -EINVAL; |
1220 | return r; | ||
1221 | } | 1214 | } |
1222 | 1215 | ||
1223 | /* jump over the NOP */ | 1216 | /* jump over the NOP */ |
@@ -1235,8 +1228,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
1235 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); | 1228 | obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); |
1236 | if (!obj) { | 1229 | if (!obj) { |
1237 | DRM_ERROR("cannot find crtc %d\n", crtc_id); | 1230 | DRM_ERROR("cannot find crtc %d\n", crtc_id); |
1238 | r = -EINVAL; | 1231 | return -EINVAL; |
1239 | goto out; | ||
1240 | } | 1232 | } |
1241 | crtc = obj_to_crtc(obj); | 1233 | crtc = obj_to_crtc(obj); |
1242 | radeon_crtc = to_radeon_crtc(crtc); | 1234 | radeon_crtc = to_radeon_crtc(crtc); |
@@ -1258,14 +1250,13 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) | |||
1258 | break; | 1250 | break; |
1259 | default: | 1251 | default: |
1260 | DRM_ERROR("unknown crtc reloc\n"); | 1252 | DRM_ERROR("unknown crtc reloc\n"); |
1261 | r = -EINVAL; | 1253 | return -EINVAL; |
1262 | goto out; | ||
1263 | } | 1254 | } |
1264 | ib[h_idx] = header; | 1255 | ib[h_idx] = header; |
1265 | ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; | 1256 | ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; |
1266 | } | 1257 | } |
1267 | out: | 1258 | |
1268 | return r; | 1259 | return 0; |
1269 | } | 1260 | } |
1270 | 1261 | ||
1271 | /** | 1262 | /** |
@@ -1415,6 +1406,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1415 | } | 1406 | } |
1416 | track->zb.robj = reloc->robj; | 1407 | track->zb.robj = reloc->robj; |
1417 | track->zb.offset = idx_value; | 1408 | track->zb.offset = idx_value; |
1409 | track->zb_dirty = true; | ||
1418 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1410 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1419 | break; | 1411 | break; |
1420 | case RADEON_RB3D_COLOROFFSET: | 1412 | case RADEON_RB3D_COLOROFFSET: |
@@ -1427,6 +1419,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1427 | } | 1419 | } |
1428 | track->cb[0].robj = reloc->robj; | 1420 | track->cb[0].robj = reloc->robj; |
1429 | track->cb[0].offset = idx_value; | 1421 | track->cb[0].offset = idx_value; |
1422 | track->cb_dirty = true; | ||
1430 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1423 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1431 | break; | 1424 | break; |
1432 | case RADEON_PP_TXOFFSET_0: | 1425 | case RADEON_PP_TXOFFSET_0: |
@@ -1442,6 +1435,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1442 | } | 1435 | } |
1443 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1436 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1444 | track->textures[i].robj = reloc->robj; | 1437 | track->textures[i].robj = reloc->robj; |
1438 | track->tex_dirty = true; | ||
1445 | break; | 1439 | break; |
1446 | case RADEON_PP_CUBIC_OFFSET_T0_0: | 1440 | case RADEON_PP_CUBIC_OFFSET_T0_0: |
1447 | case RADEON_PP_CUBIC_OFFSET_T0_1: | 1441 | case RADEON_PP_CUBIC_OFFSET_T0_1: |
@@ -1459,6 +1453,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1459 | track->textures[0].cube_info[i].offset = idx_value; | 1453 | track->textures[0].cube_info[i].offset = idx_value; |
1460 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1454 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1461 | track->textures[0].cube_info[i].robj = reloc->robj; | 1455 | track->textures[0].cube_info[i].robj = reloc->robj; |
1456 | track->tex_dirty = true; | ||
1462 | break; | 1457 | break; |
1463 | case RADEON_PP_CUBIC_OFFSET_T1_0: | 1458 | case RADEON_PP_CUBIC_OFFSET_T1_0: |
1464 | case RADEON_PP_CUBIC_OFFSET_T1_1: | 1459 | case RADEON_PP_CUBIC_OFFSET_T1_1: |
@@ -1476,6 +1471,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1476 | track->textures[1].cube_info[i].offset = idx_value; | 1471 | track->textures[1].cube_info[i].offset = idx_value; |
1477 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1472 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1478 | track->textures[1].cube_info[i].robj = reloc->robj; | 1473 | track->textures[1].cube_info[i].robj = reloc->robj; |
1474 | track->tex_dirty = true; | ||
1479 | break; | 1475 | break; |
1480 | case RADEON_PP_CUBIC_OFFSET_T2_0: | 1476 | case RADEON_PP_CUBIC_OFFSET_T2_0: |
1481 | case RADEON_PP_CUBIC_OFFSET_T2_1: | 1477 | case RADEON_PP_CUBIC_OFFSET_T2_1: |
@@ -1493,9 +1489,12 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1493 | track->textures[2].cube_info[i].offset = idx_value; | 1489 | track->textures[2].cube_info[i].offset = idx_value; |
1494 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1490 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
1495 | track->textures[2].cube_info[i].robj = reloc->robj; | 1491 | track->textures[2].cube_info[i].robj = reloc->robj; |
1492 | track->tex_dirty = true; | ||
1496 | break; | 1493 | break; |
1497 | case RADEON_RE_WIDTH_HEIGHT: | 1494 | case RADEON_RE_WIDTH_HEIGHT: |
1498 | track->maxy = ((idx_value >> 16) & 0x7FF); | 1495 | track->maxy = ((idx_value >> 16) & 0x7FF); |
1496 | track->cb_dirty = true; | ||
1497 | track->zb_dirty = true; | ||
1499 | break; | 1498 | break; |
1500 | case RADEON_RB3D_COLORPITCH: | 1499 | case RADEON_RB3D_COLORPITCH: |
1501 | r = r100_cs_packet_next_reloc(p, &reloc); | 1500 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1516,9 +1515,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1516 | ib[idx] = tmp; | 1515 | ib[idx] = tmp; |
1517 | 1516 | ||
1518 | track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; | 1517 | track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; |
1518 | track->cb_dirty = true; | ||
1519 | break; | 1519 | break; |
1520 | case RADEON_RB3D_DEPTHPITCH: | 1520 | case RADEON_RB3D_DEPTHPITCH: |
1521 | track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; | 1521 | track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; |
1522 | track->zb_dirty = true; | ||
1522 | break; | 1523 | break; |
1523 | case RADEON_RB3D_CNTL: | 1524 | case RADEON_RB3D_CNTL: |
1524 | switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { | 1525 | switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { |
@@ -1543,6 +1544,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1543 | return -EINVAL; | 1544 | return -EINVAL; |
1544 | } | 1545 | } |
1545 | track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); | 1546 | track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); |
1547 | track->cb_dirty = true; | ||
1548 | track->zb_dirty = true; | ||
1546 | break; | 1549 | break; |
1547 | case RADEON_RB3D_ZSTENCILCNTL: | 1550 | case RADEON_RB3D_ZSTENCILCNTL: |
1548 | switch (idx_value & 0xf) { | 1551 | switch (idx_value & 0xf) { |
@@ -1560,6 +1563,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1560 | default: | 1563 | default: |
1561 | break; | 1564 | break; |
1562 | } | 1565 | } |
1566 | track->zb_dirty = true; | ||
1563 | break; | 1567 | break; |
1564 | case RADEON_RB3D_ZPASS_ADDR: | 1568 | case RADEON_RB3D_ZPASS_ADDR: |
1565 | r = r100_cs_packet_next_reloc(p, &reloc); | 1569 | r = r100_cs_packet_next_reloc(p, &reloc); |
@@ -1576,6 +1580,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1576 | uint32_t temp = idx_value >> 4; | 1580 | uint32_t temp = idx_value >> 4; |
1577 | for (i = 0; i < track->num_texture; i++) | 1581 | for (i = 0; i < track->num_texture; i++) |
1578 | track->textures[i].enabled = !!(temp & (1 << i)); | 1582 | track->textures[i].enabled = !!(temp & (1 << i)); |
1583 | track->tex_dirty = true; | ||
1579 | } | 1584 | } |
1580 | break; | 1585 | break; |
1581 | case RADEON_SE_VF_CNTL: | 1586 | case RADEON_SE_VF_CNTL: |
@@ -1590,12 +1595,14 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1590 | i = (reg - RADEON_PP_TEX_SIZE_0) / 8; | 1595 | i = (reg - RADEON_PP_TEX_SIZE_0) / 8; |
1591 | track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; | 1596 | track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; |
1592 | track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; | 1597 | track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; |
1598 | track->tex_dirty = true; | ||
1593 | break; | 1599 | break; |
1594 | case RADEON_PP_TEX_PITCH_0: | 1600 | case RADEON_PP_TEX_PITCH_0: |
1595 | case RADEON_PP_TEX_PITCH_1: | 1601 | case RADEON_PP_TEX_PITCH_1: |
1596 | case RADEON_PP_TEX_PITCH_2: | 1602 | case RADEON_PP_TEX_PITCH_2: |
1597 | i = (reg - RADEON_PP_TEX_PITCH_0) / 8; | 1603 | i = (reg - RADEON_PP_TEX_PITCH_0) / 8; |
1598 | track->textures[i].pitch = idx_value + 32; | 1604 | track->textures[i].pitch = idx_value + 32; |
1605 | track->tex_dirty = true; | ||
1599 | break; | 1606 | break; |
1600 | case RADEON_PP_TXFILTER_0: | 1607 | case RADEON_PP_TXFILTER_0: |
1601 | case RADEON_PP_TXFILTER_1: | 1608 | case RADEON_PP_TXFILTER_1: |
@@ -1609,6 +1616,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1609 | tmp = (idx_value >> 27) & 0x7; | 1616 | tmp = (idx_value >> 27) & 0x7; |
1610 | if (tmp == 2 || tmp == 6) | 1617 | if (tmp == 2 || tmp == 6) |
1611 | track->textures[i].roundup_h = false; | 1618 | track->textures[i].roundup_h = false; |
1619 | track->tex_dirty = true; | ||
1612 | break; | 1620 | break; |
1613 | case RADEON_PP_TXFORMAT_0: | 1621 | case RADEON_PP_TXFORMAT_0: |
1614 | case RADEON_PP_TXFORMAT_1: | 1622 | case RADEON_PP_TXFORMAT_1: |
@@ -1661,6 +1669,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1661 | } | 1669 | } |
1662 | track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); | 1670 | track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); |
1663 | track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); | 1671 | track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); |
1672 | track->tex_dirty = true; | ||
1664 | break; | 1673 | break; |
1665 | case RADEON_PP_CUBIC_FACES_0: | 1674 | case RADEON_PP_CUBIC_FACES_0: |
1666 | case RADEON_PP_CUBIC_FACES_1: | 1675 | case RADEON_PP_CUBIC_FACES_1: |
@@ -1671,6 +1680,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1671 | track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); | 1680 | track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); |
1672 | track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); | 1681 | track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); |
1673 | } | 1682 | } |
1683 | track->tex_dirty = true; | ||
1674 | break; | 1684 | break; |
1675 | default: | 1685 | default: |
1676 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", | 1686 | printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", |
@@ -2074,12 +2084,13 @@ int r100_asic_reset(struct radeon_device *rdev) | |||
2074 | { | 2084 | { |
2075 | struct r100_mc_save save; | 2085 | struct r100_mc_save save; |
2076 | u32 status, tmp; | 2086 | u32 status, tmp; |
2087 | int ret = 0; | ||
2077 | 2088 | ||
2078 | r100_mc_stop(rdev, &save); | ||
2079 | status = RREG32(R_000E40_RBBM_STATUS); | 2089 | status = RREG32(R_000E40_RBBM_STATUS); |
2080 | if (!G_000E40_GUI_ACTIVE(status)) { | 2090 | if (!G_000E40_GUI_ACTIVE(status)) { |
2081 | return 0; | 2091 | return 0; |
2082 | } | 2092 | } |
2093 | r100_mc_stop(rdev, &save); | ||
2083 | status = RREG32(R_000E40_RBBM_STATUS); | 2094 | status = RREG32(R_000E40_RBBM_STATUS); |
2084 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); | 2095 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); |
2085 | /* stop CP */ | 2096 | /* stop CP */ |
@@ -2119,11 +2130,11 @@ int r100_asic_reset(struct radeon_device *rdev) | |||
2119 | G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { | 2130 | G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) { |
2120 | dev_err(rdev->dev, "failed to reset GPU\n"); | 2131 | dev_err(rdev->dev, "failed to reset GPU\n"); |
2121 | rdev->gpu_lockup = true; | 2132 | rdev->gpu_lockup = true; |
2122 | return -1; | 2133 | ret = -1; |
2123 | } | 2134 | } else |
2135 | dev_info(rdev->dev, "GPU reset succeed\n"); | ||
2124 | r100_mc_resume(rdev, &save); | 2136 | r100_mc_resume(rdev, &save); |
2125 | dev_info(rdev->dev, "GPU reset succeed\n"); | 2137 | return ret; |
2126 | return 0; | ||
2127 | } | 2138 | } |
2128 | 2139 | ||
2129 | void r100_set_common_regs(struct radeon_device *rdev) | 2140 | void r100_set_common_regs(struct radeon_device *rdev) |
@@ -2297,7 +2308,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev) | |||
2297 | /* FIXME we don't use the second aperture yet when we could use it */ | 2308 | /* FIXME we don't use the second aperture yet when we could use it */ |
2298 | if (rdev->mc.visible_vram_size > rdev->mc.aper_size) | 2309 | if (rdev->mc.visible_vram_size > rdev->mc.aper_size) |
2299 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 2310 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
2300 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
2301 | config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); | 2311 | config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); |
2302 | if (rdev->flags & RADEON_IS_IGP) { | 2312 | if (rdev->flags & RADEON_IS_IGP) { |
2303 | uint32_t tom; | 2313 | uint32_t tom; |
@@ -2318,6 +2328,9 @@ void r100_vram_init_sizes(struct radeon_device *rdev) | |||
2318 | /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - | 2328 | /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - |
2319 | * Novell bug 204882 + along with lots of ubuntu ones | 2329 | * Novell bug 204882 + along with lots of ubuntu ones |
2320 | */ | 2330 | */ |
2331 | if (rdev->mc.aper_size > config_aper_size) | ||
2332 | config_aper_size = rdev->mc.aper_size; | ||
2333 | |||
2321 | if (config_aper_size > rdev->mc.real_vram_size) | 2334 | if (config_aper_size > rdev->mc.real_vram_size) |
2322 | rdev->mc.mc_vram_size = config_aper_size; | 2335 | rdev->mc.mc_vram_size = config_aper_size; |
2323 | else | 2336 | else |
@@ -2331,10 +2344,10 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state) | |||
2331 | 2344 | ||
2332 | temp = RREG32(RADEON_CONFIG_CNTL); | 2345 | temp = RREG32(RADEON_CONFIG_CNTL); |
2333 | if (state == false) { | 2346 | if (state == false) { |
2334 | temp &= ~(1<<8); | 2347 | temp &= ~RADEON_CFG_VGA_RAM_EN; |
2335 | temp |= (1<<9); | 2348 | temp |= RADEON_CFG_VGA_IO_DIS; |
2336 | } else { | 2349 | } else { |
2337 | temp &= ~(1<<9); | 2350 | temp &= ~RADEON_CFG_VGA_IO_DIS; |
2338 | } | 2351 | } |
2339 | WREG32(RADEON_CONFIG_CNTL, temp); | 2352 | WREG32(RADEON_CONFIG_CNTL, temp); |
2340 | } | 2353 | } |
@@ -3225,6 +3238,8 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev, | |||
3225 | for (u = 0; u < track->num_texture; u++) { | 3238 | for (u = 0; u < track->num_texture; u++) { |
3226 | if (!track->textures[u].enabled) | 3239 | if (!track->textures[u].enabled) |
3227 | continue; | 3240 | continue; |
3241 | if (track->textures[u].lookup_disable) | ||
3242 | continue; | ||
3228 | robj = track->textures[u].robj; | 3243 | robj = track->textures[u].robj; |
3229 | if (robj == NULL) { | 3244 | if (robj == NULL) { |
3230 | DRM_ERROR("No texture bound to unit %u\n", u); | 3245 | DRM_ERROR("No texture bound to unit %u\n", u); |
@@ -3300,9 +3315,9 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
3300 | unsigned long size; | 3315 | unsigned long size; |
3301 | unsigned prim_walk; | 3316 | unsigned prim_walk; |
3302 | unsigned nverts; | 3317 | unsigned nverts; |
3303 | unsigned num_cb = track->num_cb; | 3318 | unsigned num_cb = track->cb_dirty ? track->num_cb : 0; |
3304 | 3319 | ||
3305 | if (!track->zb_cb_clear && !track->color_channel_mask && | 3320 | if (num_cb && !track->zb_cb_clear && !track->color_channel_mask && |
3306 | !track->blend_read_enable) | 3321 | !track->blend_read_enable) |
3307 | num_cb = 0; | 3322 | num_cb = 0; |
3308 | 3323 | ||
@@ -3323,7 +3338,9 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
3323 | return -EINVAL; | 3338 | return -EINVAL; |
3324 | } | 3339 | } |
3325 | } | 3340 | } |
3326 | if (track->z_enabled) { | 3341 | track->cb_dirty = false; |
3342 | |||
3343 | if (track->zb_dirty && track->z_enabled) { | ||
3327 | if (track->zb.robj == NULL) { | 3344 | if (track->zb.robj == NULL) { |
3328 | DRM_ERROR("[drm] No buffer for z buffer !\n"); | 3345 | DRM_ERROR("[drm] No buffer for z buffer !\n"); |
3329 | return -EINVAL; | 3346 | return -EINVAL; |
@@ -3340,6 +3357,28 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
3340 | return -EINVAL; | 3357 | return -EINVAL; |
3341 | } | 3358 | } |
3342 | } | 3359 | } |
3360 | track->zb_dirty = false; | ||
3361 | |||
3362 | if (track->aa_dirty && track->aaresolve) { | ||
3363 | if (track->aa.robj == NULL) { | ||
3364 | DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i); | ||
3365 | return -EINVAL; | ||
3366 | } | ||
3367 | /* I believe the format comes from colorbuffer0. */ | ||
3368 | size = track->aa.pitch * track->cb[0].cpp * track->maxy; | ||
3369 | size += track->aa.offset; | ||
3370 | if (size > radeon_bo_size(track->aa.robj)) { | ||
3371 | DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d " | ||
3372 | "(need %lu have %lu) !\n", i, size, | ||
3373 | radeon_bo_size(track->aa.robj)); | ||
3374 | DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n", | ||
3375 | i, track->aa.pitch, track->cb[0].cpp, | ||
3376 | track->aa.offset, track->maxy); | ||
3377 | return -EINVAL; | ||
3378 | } | ||
3379 | } | ||
3380 | track->aa_dirty = false; | ||
3381 | |||
3343 | prim_walk = (track->vap_vf_cntl >> 4) & 0x3; | 3382 | prim_walk = (track->vap_vf_cntl >> 4) & 0x3; |
3344 | if (track->vap_vf_cntl & (1 << 14)) { | 3383 | if (track->vap_vf_cntl & (1 << 14)) { |
3345 | nverts = track->vap_alt_nverts; | 3384 | nverts = track->vap_alt_nverts; |
@@ -3399,13 +3438,23 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
3399 | prim_walk); | 3438 | prim_walk); |
3400 | return -EINVAL; | 3439 | return -EINVAL; |
3401 | } | 3440 | } |
3402 | return r100_cs_track_texture_check(rdev, track); | 3441 | |
3442 | if (track->tex_dirty) { | ||
3443 | track->tex_dirty = false; | ||
3444 | return r100_cs_track_texture_check(rdev, track); | ||
3445 | } | ||
3446 | return 0; | ||
3403 | } | 3447 | } |
3404 | 3448 | ||
3405 | void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) | 3449 | void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) |
3406 | { | 3450 | { |
3407 | unsigned i, face; | 3451 | unsigned i, face; |
3408 | 3452 | ||
3453 | track->cb_dirty = true; | ||
3454 | track->zb_dirty = true; | ||
3455 | track->tex_dirty = true; | ||
3456 | track->aa_dirty = true; | ||
3457 | |||
3409 | if (rdev->family < CHIP_R300) { | 3458 | if (rdev->family < CHIP_R300) { |
3410 | track->num_cb = 1; | 3459 | track->num_cb = 1; |
3411 | if (rdev->family <= CHIP_RS200) | 3460 | if (rdev->family <= CHIP_RS200) |
@@ -3419,6 +3468,8 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track | |||
3419 | track->num_texture = 16; | 3468 | track->num_texture = 16; |
3420 | track->maxy = 4096; | 3469 | track->maxy = 4096; |
3421 | track->separate_cube = 0; | 3470 | track->separate_cube = 0; |
3471 | track->aaresolve = false; | ||
3472 | track->aa.robj = NULL; | ||
3422 | } | 3473 | } |
3423 | 3474 | ||
3424 | for (i = 0; i < track->num_cb; i++) { | 3475 | for (i = 0; i < track->num_cb; i++) { |
@@ -3459,6 +3510,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track | |||
3459 | track->textures[i].robj = NULL; | 3510 | track->textures[i].robj = NULL; |
3460 | /* CS IB emission code makes sure texture unit are disabled */ | 3511 | /* CS IB emission code makes sure texture unit are disabled */ |
3461 | track->textures[i].enabled = false; | 3512 | track->textures[i].enabled = false; |
3513 | track->textures[i].lookup_disable = false; | ||
3462 | track->textures[i].roundup_w = true; | 3514 | track->textures[i].roundup_w = true; |
3463 | track->textures[i].roundup_h = true; | 3515 | track->textures[i].roundup_h = true; |
3464 | if (track->separate_cube) | 3516 | if (track->separate_cube) |
@@ -3503,7 +3555,7 @@ int r100_ring_test(struct radeon_device *rdev) | |||
3503 | if (i < rdev->usec_timeout) { | 3555 | if (i < rdev->usec_timeout) { |
3504 | DRM_INFO("ring test succeeded in %d usecs\n", i); | 3556 | DRM_INFO("ring test succeeded in %d usecs\n", i); |
3505 | } else { | 3557 | } else { |
3506 | DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n", | 3558 | DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n", |
3507 | scratch, tmp); | 3559 | scratch, tmp); |
3508 | r = -EINVAL; | 3560 | r = -EINVAL; |
3509 | } | 3561 | } |
@@ -3565,7 +3617,7 @@ int r100_ib_test(struct radeon_device *rdev) | |||
3565 | if (i < rdev->usec_timeout) { | 3617 | if (i < rdev->usec_timeout) { |
3566 | DRM_INFO("ib test succeeded in %u usecs\n", i); | 3618 | DRM_INFO("ib test succeeded in %u usecs\n", i); |
3567 | } else { | 3619 | } else { |
3568 | DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n", | 3620 | DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", |
3569 | scratch, tmp); | 3621 | scratch, tmp); |
3570 | r = -EINVAL; | 3622 | r = -EINVAL; |
3571 | } | 3623 | } |
@@ -3585,13 +3637,13 @@ int r100_ib_init(struct radeon_device *rdev) | |||
3585 | 3637 | ||
3586 | r = radeon_ib_pool_init(rdev); | 3638 | r = radeon_ib_pool_init(rdev); |
3587 | if (r) { | 3639 | if (r) { |
3588 | dev_err(rdev->dev, "failled initializing IB pool (%d).\n", r); | 3640 | dev_err(rdev->dev, "failed initializing IB pool (%d).\n", r); |
3589 | r100_ib_fini(rdev); | 3641 | r100_ib_fini(rdev); |
3590 | return r; | 3642 | return r; |
3591 | } | 3643 | } |
3592 | r = r100_ib_test(rdev); | 3644 | r = r100_ib_test(rdev); |
3593 | if (r) { | 3645 | if (r) { |
3594 | dev_err(rdev->dev, "failled testing IB (%d).\n", r); | 3646 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); |
3595 | r100_ib_fini(rdev); | 3647 | r100_ib_fini(rdev); |
3596 | return r; | 3648 | return r; |
3597 | } | 3649 | } |
@@ -3727,8 +3779,6 @@ static int r100_startup(struct radeon_device *rdev) | |||
3727 | r100_mc_program(rdev); | 3779 | r100_mc_program(rdev); |
3728 | /* Resume clock */ | 3780 | /* Resume clock */ |
3729 | r100_clock_startup(rdev); | 3781 | r100_clock_startup(rdev); |
3730 | /* Initialize GPU configuration (# pipes, ...) */ | ||
3731 | // r100_gpu_init(rdev); | ||
3732 | /* Initialize GART (initialize after TTM so we can allocate | 3782 | /* Initialize GART (initialize after TTM so we can allocate |
3733 | * memory through TTM but finalize after TTM) */ | 3783 | * memory through TTM but finalize after TTM) */ |
3734 | r100_enable_bm(rdev); | 3784 | r100_enable_bm(rdev); |
@@ -3737,21 +3787,24 @@ static int r100_startup(struct radeon_device *rdev) | |||
3737 | if (r) | 3787 | if (r) |
3738 | return r; | 3788 | return r; |
3739 | } | 3789 | } |
3790 | |||
3791 | /* allocate wb buffer */ | ||
3792 | r = radeon_wb_init(rdev); | ||
3793 | if (r) | ||
3794 | return r; | ||
3795 | |||
3740 | /* Enable IRQ */ | 3796 | /* Enable IRQ */ |
3741 | r100_irq_set(rdev); | 3797 | r100_irq_set(rdev); |
3742 | rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | 3798 | rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
3743 | /* 1M ring buffer */ | 3799 | /* 1M ring buffer */ |
3744 | r = r100_cp_init(rdev, 1024 * 1024); | 3800 | r = r100_cp_init(rdev, 1024 * 1024); |
3745 | if (r) { | 3801 | if (r) { |
3746 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | 3802 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); |
3747 | return r; | 3803 | return r; |
3748 | } | 3804 | } |
3749 | r = r100_wb_init(rdev); | ||
3750 | if (r) | ||
3751 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
3752 | r = r100_ib_init(rdev); | 3805 | r = r100_ib_init(rdev); |
3753 | if (r) { | 3806 | if (r) { |
3754 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | 3807 | dev_err(rdev->dev, "failed initializing IB (%d).\n", r); |
3755 | return r; | 3808 | return r; |
3756 | } | 3809 | } |
3757 | return 0; | 3810 | return 0; |
@@ -3782,7 +3835,7 @@ int r100_resume(struct radeon_device *rdev) | |||
3782 | int r100_suspend(struct radeon_device *rdev) | 3835 | int r100_suspend(struct radeon_device *rdev) |
3783 | { | 3836 | { |
3784 | r100_cp_disable(rdev); | 3837 | r100_cp_disable(rdev); |
3785 | r100_wb_disable(rdev); | 3838 | radeon_wb_disable(rdev); |
3786 | r100_irq_disable(rdev); | 3839 | r100_irq_disable(rdev); |
3787 | if (rdev->flags & RADEON_IS_PCI) | 3840 | if (rdev->flags & RADEON_IS_PCI) |
3788 | r100_pci_gart_disable(rdev); | 3841 | r100_pci_gart_disable(rdev); |
@@ -3792,7 +3845,7 @@ int r100_suspend(struct radeon_device *rdev) | |||
3792 | void r100_fini(struct radeon_device *rdev) | 3845 | void r100_fini(struct radeon_device *rdev) |
3793 | { | 3846 | { |
3794 | r100_cp_fini(rdev); | 3847 | r100_cp_fini(rdev); |
3795 | r100_wb_fini(rdev); | 3848 | radeon_wb_fini(rdev); |
3796 | r100_ib_fini(rdev); | 3849 | r100_ib_fini(rdev); |
3797 | radeon_gem_fini(rdev); | 3850 | radeon_gem_fini(rdev); |
3798 | if (rdev->flags & RADEON_IS_PCI) | 3851 | if (rdev->flags & RADEON_IS_PCI) |
@@ -3905,7 +3958,7 @@ int r100_init(struct radeon_device *rdev) | |||
3905 | /* Somethings want wront with the accel init stop accel */ | 3958 | /* Somethings want wront with the accel init stop accel */ |
3906 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 3959 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
3907 | r100_cp_fini(rdev); | 3960 | r100_cp_fini(rdev); |
3908 | r100_wb_fini(rdev); | 3961 | radeon_wb_fini(rdev); |
3909 | r100_ib_fini(rdev); | 3962 | r100_ib_fini(rdev); |
3910 | radeon_irq_kms_fini(rdev); | 3963 | radeon_irq_kms_fini(rdev); |
3911 | if (rdev->flags & RADEON_IS_PCI) | 3964 | if (rdev->flags & RADEON_IS_PCI) |