diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/rs600.c')
-rw-r--r-- | drivers/gpu/drm/radeon/rs600.c | 155 |
1 files changed, 102 insertions, 53 deletions
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 51d5f7b5ab21..1f5850e473cc 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -46,6 +46,45 @@ | |||
46 | void rs600_gpu_init(struct radeon_device *rdev); | 46 | void rs600_gpu_init(struct radeon_device *rdev); |
47 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); | 47 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); |
48 | 48 | ||
49 | void rs600_pre_page_flip(struct radeon_device *rdev, int crtc) | ||
50 | { | ||
51 | /* enable the pflip int */ | ||
52 | radeon_irq_kms_pflip_irq_get(rdev, crtc); | ||
53 | } | ||
54 | |||
55 | void rs600_post_page_flip(struct radeon_device *rdev, int crtc) | ||
56 | { | ||
57 | /* disable the pflip int */ | ||
58 | radeon_irq_kms_pflip_irq_put(rdev, crtc); | ||
59 | } | ||
60 | |||
61 | u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) | ||
62 | { | ||
63 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; | ||
64 | u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); | ||
65 | |||
66 | /* Lock the graphics update lock */ | ||
67 | tmp |= AVIVO_D1GRPH_UPDATE_LOCK; | ||
68 | WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); | ||
69 | |||
70 | /* update the scanout addresses */ | ||
71 | WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, | ||
72 | (u32)crtc_base); | ||
73 | WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, | ||
74 | (u32)crtc_base); | ||
75 | |||
76 | /* Wait for update_pending to go high. */ | ||
77 | while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); | ||
78 | DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); | ||
79 | |||
80 | /* Unlock the lock, so double-buffering can take place inside vblank */ | ||
81 | tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; | ||
82 | WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); | ||
83 | |||
84 | /* Return current update_pending status: */ | ||
85 | return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; | ||
86 | } | ||
87 | |||
49 | void rs600_pm_misc(struct radeon_device *rdev) | 88 | void rs600_pm_misc(struct radeon_device *rdev) |
50 | { | 89 | { |
51 | int requested_index = rdev->pm.requested_power_state_index; | 90 | int requested_index = rdev->pm.requested_power_state_index; |
@@ -75,7 +114,7 @@ void rs600_pm_misc(struct radeon_device *rdev) | |||
75 | udelay(voltage->delay); | 114 | udelay(voltage->delay); |
76 | } | 115 | } |
77 | } else if (voltage->type == VOLTAGE_VDDC) | 116 | } else if (voltage->type == VOLTAGE_VDDC) |
78 | radeon_atom_set_voltage(rdev, voltage->vddc_id); | 117 | radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC); |
79 | 118 | ||
80 | dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); | 119 | dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH); |
81 | dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); | 120 | dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf); |
@@ -289,16 +328,16 @@ void rs600_bm_disable(struct radeon_device *rdev) | |||
289 | 328 | ||
290 | int rs600_asic_reset(struct radeon_device *rdev) | 329 | int rs600_asic_reset(struct radeon_device *rdev) |
291 | { | 330 | { |
292 | u32 status, tmp; | ||
293 | |||
294 | struct rv515_mc_save save; | 331 | struct rv515_mc_save save; |
332 | u32 status, tmp; | ||
333 | int ret = 0; | ||
295 | 334 | ||
296 | /* Stops all mc clients */ | ||
297 | rv515_mc_stop(rdev, &save); | ||
298 | status = RREG32(R_000E40_RBBM_STATUS); | 335 | status = RREG32(R_000E40_RBBM_STATUS); |
299 | if (!G_000E40_GUI_ACTIVE(status)) { | 336 | if (!G_000E40_GUI_ACTIVE(status)) { |
300 | return 0; | 337 | return 0; |
301 | } | 338 | } |
339 | /* Stops all mc clients */ | ||
340 | rv515_mc_stop(rdev, &save); | ||
302 | status = RREG32(R_000E40_RBBM_STATUS); | 341 | status = RREG32(R_000E40_RBBM_STATUS); |
303 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); | 342 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status); |
304 | /* stop CP */ | 343 | /* stop CP */ |
@@ -342,11 +381,11 @@ int rs600_asic_reset(struct radeon_device *rdev) | |||
342 | if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { | 381 | if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { |
343 | dev_err(rdev->dev, "failed to reset GPU\n"); | 382 | dev_err(rdev->dev, "failed to reset GPU\n"); |
344 | rdev->gpu_lockup = true; | 383 | rdev->gpu_lockup = true; |
345 | return -1; | 384 | ret = -1; |
346 | } | 385 | } else |
386 | dev_info(rdev->dev, "GPU reset succeed\n"); | ||
347 | rv515_mc_resume(rdev, &save); | 387 | rv515_mc_resume(rdev, &save); |
348 | dev_info(rdev->dev, "GPU reset succeed\n"); | 388 | return ret; |
349 | return 0; | ||
350 | } | 389 | } |
351 | 390 | ||
352 | /* | 391 | /* |
@@ -375,7 +414,7 @@ int rs600_gart_init(struct radeon_device *rdev) | |||
375 | int r; | 414 | int r; |
376 | 415 | ||
377 | if (rdev->gart.table.vram.robj) { | 416 | if (rdev->gart.table.vram.robj) { |
378 | WARN(1, "RS600 GART already initialized.\n"); | 417 | WARN(1, "RS600 GART already initialized\n"); |
379 | return 0; | 418 | return 0; |
380 | } | 419 | } |
381 | /* Initialize common gart structure */ | 420 | /* Initialize common gart structure */ |
@@ -387,7 +426,7 @@ int rs600_gart_init(struct radeon_device *rdev) | |||
387 | return radeon_gart_table_vram_alloc(rdev); | 426 | return radeon_gart_table_vram_alloc(rdev); |
388 | } | 427 | } |
389 | 428 | ||
390 | int rs600_gart_enable(struct radeon_device *rdev) | 429 | static int rs600_gart_enable(struct radeon_device *rdev) |
391 | { | 430 | { |
392 | u32 tmp; | 431 | u32 tmp; |
393 | int r, i; | 432 | int r, i; |
@@ -401,8 +440,8 @@ int rs600_gart_enable(struct radeon_device *rdev) | |||
401 | return r; | 440 | return r; |
402 | radeon_gart_restore(rdev); | 441 | radeon_gart_restore(rdev); |
403 | /* Enable bus master */ | 442 | /* Enable bus master */ |
404 | tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS; | 443 | tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; |
405 | WREG32(R_00004C_BUS_CNTL, tmp); | 444 | WREG32(RADEON_BUS_CNTL, tmp); |
406 | /* FIXME: setup default page */ | 445 | /* FIXME: setup default page */ |
407 | WREG32_MC(R_000100_MC_PT0_CNTL, | 446 | WREG32_MC(R_000100_MC_PT0_CNTL, |
408 | (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | | 447 | (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | |
@@ -505,7 +544,7 @@ int rs600_irq_set(struct radeon_device *rdev) | |||
505 | ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); | 544 | ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); |
506 | 545 | ||
507 | if (!rdev->irq.installed) { | 546 | if (!rdev->irq.installed) { |
508 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); | 547 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
509 | WREG32(R_000040_GEN_INT_CNTL, 0); | 548 | WREG32(R_000040_GEN_INT_CNTL, 0); |
510 | return -EINVAL; | 549 | return -EINVAL; |
511 | } | 550 | } |
@@ -515,10 +554,12 @@ int rs600_irq_set(struct radeon_device *rdev) | |||
515 | if (rdev->irq.gui_idle) { | 554 | if (rdev->irq.gui_idle) { |
516 | tmp |= S_000040_GUI_IDLE(1); | 555 | tmp |= S_000040_GUI_IDLE(1); |
517 | } | 556 | } |
518 | if (rdev->irq.crtc_vblank_int[0]) { | 557 | if (rdev->irq.crtc_vblank_int[0] || |
558 | rdev->irq.pflip[0]) { | ||
519 | mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); | 559 | mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); |
520 | } | 560 | } |
521 | if (rdev->irq.crtc_vblank_int[1]) { | 561 | if (rdev->irq.crtc_vblank_int[1] || |
562 | rdev->irq.pflip[1]) { | ||
522 | mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); | 563 | mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); |
523 | } | 564 | } |
524 | if (rdev->irq.hpd[0]) { | 565 | if (rdev->irq.hpd[0]) { |
@@ -534,7 +575,7 @@ int rs600_irq_set(struct radeon_device *rdev) | |||
534 | return 0; | 575 | return 0; |
535 | } | 576 | } |
536 | 577 | ||
537 | static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) | 578 | static inline u32 rs600_irq_ack(struct radeon_device *rdev) |
538 | { | 579 | { |
539 | uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); | 580 | uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); |
540 | uint32_t irq_mask = S_000044_SW_INT(1); | 581 | uint32_t irq_mask = S_000044_SW_INT(1); |
@@ -547,27 +588,27 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_ | |||
547 | } | 588 | } |
548 | 589 | ||
549 | if (G_000044_DISPLAY_INT_STAT(irqs)) { | 590 | if (G_000044_DISPLAY_INT_STAT(irqs)) { |
550 | *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); | 591 | rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); |
551 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) { | 592 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
552 | WREG32(R_006534_D1MODE_VBLANK_STATUS, | 593 | WREG32(R_006534_D1MODE_VBLANK_STATUS, |
553 | S_006534_D1MODE_VBLANK_ACK(1)); | 594 | S_006534_D1MODE_VBLANK_ACK(1)); |
554 | } | 595 | } |
555 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) { | 596 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
556 | WREG32(R_006D34_D2MODE_VBLANK_STATUS, | 597 | WREG32(R_006D34_D2MODE_VBLANK_STATUS, |
557 | S_006D34_D2MODE_VBLANK_ACK(1)); | 598 | S_006D34_D2MODE_VBLANK_ACK(1)); |
558 | } | 599 | } |
559 | if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) { | 600 | if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
560 | tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); | 601 | tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); |
561 | tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1); | 602 | tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1); |
562 | WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); | 603 | WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); |
563 | } | 604 | } |
564 | if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) { | 605 | if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
565 | tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); | 606 | tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); |
566 | tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1); | 607 | tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1); |
567 | WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); | 608 | WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); |
568 | } | 609 | } |
569 | } else { | 610 | } else { |
570 | *r500_disp_int = 0; | 611 | rdev->irq.stat_regs.r500.disp_int = 0; |
571 | } | 612 | } |
572 | 613 | ||
573 | if (irqs) { | 614 | if (irqs) { |
@@ -578,32 +619,30 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_ | |||
578 | 619 | ||
579 | void rs600_irq_disable(struct radeon_device *rdev) | 620 | void rs600_irq_disable(struct radeon_device *rdev) |
580 | { | 621 | { |
581 | u32 tmp; | ||
582 | |||
583 | WREG32(R_000040_GEN_INT_CNTL, 0); | 622 | WREG32(R_000040_GEN_INT_CNTL, 0); |
584 | WREG32(R_006540_DxMODE_INT_MASK, 0); | 623 | WREG32(R_006540_DxMODE_INT_MASK, 0); |
585 | /* Wait and acknowledge irq */ | 624 | /* Wait and acknowledge irq */ |
586 | mdelay(1); | 625 | mdelay(1); |
587 | rs600_irq_ack(rdev, &tmp); | 626 | rs600_irq_ack(rdev); |
588 | } | 627 | } |
589 | 628 | ||
590 | int rs600_irq_process(struct radeon_device *rdev) | 629 | int rs600_irq_process(struct radeon_device *rdev) |
591 | { | 630 | { |
592 | uint32_t status, msi_rearm; | 631 | u32 status, msi_rearm; |
593 | uint32_t r500_disp_int; | ||
594 | bool queue_hotplug = false; | 632 | bool queue_hotplug = false; |
595 | 633 | ||
596 | /* reset gui idle ack. the status bit is broken */ | 634 | /* reset gui idle ack. the status bit is broken */ |
597 | rdev->irq.gui_idle_acked = false; | 635 | rdev->irq.gui_idle_acked = false; |
598 | 636 | ||
599 | status = rs600_irq_ack(rdev, &r500_disp_int); | 637 | status = rs600_irq_ack(rdev); |
600 | if (!status && !r500_disp_int) { | 638 | if (!status && !rdev->irq.stat_regs.r500.disp_int) { |
601 | return IRQ_NONE; | 639 | return IRQ_NONE; |
602 | } | 640 | } |
603 | while (status || r500_disp_int) { | 641 | while (status || rdev->irq.stat_regs.r500.disp_int) { |
604 | /* SW interrupt */ | 642 | /* SW interrupt */ |
605 | if (G_000044_SW_INT(status)) | 643 | if (G_000044_SW_INT(status)) { |
606 | radeon_fence_process(rdev); | 644 | radeon_fence_process(rdev); |
645 | } | ||
607 | /* GUI idle */ | 646 | /* GUI idle */ |
608 | if (G_000040_GUI_IDLE(status)) { | 647 | if (G_000040_GUI_IDLE(status)) { |
609 | rdev->irq.gui_idle_acked = true; | 648 | rdev->irq.gui_idle_acked = true; |
@@ -611,30 +650,38 @@ int rs600_irq_process(struct radeon_device *rdev) | |||
611 | wake_up(&rdev->irq.idle_queue); | 650 | wake_up(&rdev->irq.idle_queue); |
612 | } | 651 | } |
613 | /* Vertical blank interrupts */ | 652 | /* Vertical blank interrupts */ |
614 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) { | 653 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
615 | drm_handle_vblank(rdev->ddev, 0); | 654 | if (rdev->irq.crtc_vblank_int[0]) { |
616 | rdev->pm.vblank_sync = true; | 655 | drm_handle_vblank(rdev->ddev, 0); |
617 | wake_up(&rdev->irq.vblank_queue); | 656 | rdev->pm.vblank_sync = true; |
657 | wake_up(&rdev->irq.vblank_queue); | ||
658 | } | ||
659 | if (rdev->irq.pflip[0]) | ||
660 | radeon_crtc_handle_flip(rdev, 0); | ||
618 | } | 661 | } |
619 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) { | 662 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
620 | drm_handle_vblank(rdev->ddev, 1); | 663 | if (rdev->irq.crtc_vblank_int[1]) { |
621 | rdev->pm.vblank_sync = true; | 664 | drm_handle_vblank(rdev->ddev, 1); |
622 | wake_up(&rdev->irq.vblank_queue); | 665 | rdev->pm.vblank_sync = true; |
666 | wake_up(&rdev->irq.vblank_queue); | ||
667 | } | ||
668 | if (rdev->irq.pflip[1]) | ||
669 | radeon_crtc_handle_flip(rdev, 1); | ||
623 | } | 670 | } |
624 | if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) { | 671 | if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
625 | queue_hotplug = true; | 672 | queue_hotplug = true; |
626 | DRM_DEBUG("HPD1\n"); | 673 | DRM_DEBUG("HPD1\n"); |
627 | } | 674 | } |
628 | if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) { | 675 | if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { |
629 | queue_hotplug = true; | 676 | queue_hotplug = true; |
630 | DRM_DEBUG("HPD2\n"); | 677 | DRM_DEBUG("HPD2\n"); |
631 | } | 678 | } |
632 | status = rs600_irq_ack(rdev, &r500_disp_int); | 679 | status = rs600_irq_ack(rdev); |
633 | } | 680 | } |
634 | /* reset gui idle ack. the status bit is broken */ | 681 | /* reset gui idle ack. the status bit is broken */ |
635 | rdev->irq.gui_idle_acked = false; | 682 | rdev->irq.gui_idle_acked = false; |
636 | if (queue_hotplug) | 683 | if (queue_hotplug) |
637 | queue_work(rdev->wq, &rdev->hotplug_work); | 684 | schedule_work(&rdev->hotplug_work); |
638 | if (rdev->msi_enabled) { | 685 | if (rdev->msi_enabled) { |
639 | switch (rdev->family) { | 686 | switch (rdev->family) { |
640 | case CHIP_RS600: | 687 | case CHIP_RS600: |
@@ -693,7 +740,6 @@ void rs600_mc_init(struct radeon_device *rdev) | |||
693 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | 740 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
694 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | 741 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; |
695 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 742 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
696 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
697 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 743 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
698 | base = RREG32_MC(R_000004_MC_FB_LOCATION); | 744 | base = RREG32_MC(R_000004_MC_FB_LOCATION); |
699 | base = G_000004_MC_FB_START(base) << 16; | 745 | base = G_000004_MC_FB_START(base) << 16; |
@@ -796,21 +842,24 @@ static int rs600_startup(struct radeon_device *rdev) | |||
796 | r = rs600_gart_enable(rdev); | 842 | r = rs600_gart_enable(rdev); |
797 | if (r) | 843 | if (r) |
798 | return r; | 844 | return r; |
845 | |||
846 | /* allocate wb buffer */ | ||
847 | r = radeon_wb_init(rdev); | ||
848 | if (r) | ||
849 | return r; | ||
850 | |||
799 | /* Enable IRQ */ | 851 | /* Enable IRQ */ |
800 | rs600_irq_set(rdev); | 852 | rs600_irq_set(rdev); |
801 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | 853 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
802 | /* 1M ring buffer */ | 854 | /* 1M ring buffer */ |
803 | r = r100_cp_init(rdev, 1024 * 1024); | 855 | r = r100_cp_init(rdev, 1024 * 1024); |
804 | if (r) { | 856 | if (r) { |
805 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | 857 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); |
806 | return r; | 858 | return r; |
807 | } | 859 | } |
808 | r = r100_wb_init(rdev); | ||
809 | if (r) | ||
810 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | ||
811 | r = r100_ib_init(rdev); | 860 | r = r100_ib_init(rdev); |
812 | if (r) { | 861 | if (r) { |
813 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | 862 | dev_err(rdev->dev, "failed initializing IB (%d).\n", r); |
814 | return r; | 863 | return r; |
815 | } | 864 | } |
816 | 865 | ||
@@ -848,7 +897,7 @@ int rs600_suspend(struct radeon_device *rdev) | |||
848 | { | 897 | { |
849 | r600_audio_fini(rdev); | 898 | r600_audio_fini(rdev); |
850 | r100_cp_disable(rdev); | 899 | r100_cp_disable(rdev); |
851 | r100_wb_disable(rdev); | 900 | radeon_wb_disable(rdev); |
852 | rs600_irq_disable(rdev); | 901 | rs600_irq_disable(rdev); |
853 | rs600_gart_disable(rdev); | 902 | rs600_gart_disable(rdev); |
854 | return 0; | 903 | return 0; |
@@ -858,7 +907,7 @@ void rs600_fini(struct radeon_device *rdev) | |||
858 | { | 907 | { |
859 | r600_audio_fini(rdev); | 908 | r600_audio_fini(rdev); |
860 | r100_cp_fini(rdev); | 909 | r100_cp_fini(rdev); |
861 | r100_wb_fini(rdev); | 910 | radeon_wb_fini(rdev); |
862 | r100_ib_fini(rdev); | 911 | r100_ib_fini(rdev); |
863 | radeon_gem_fini(rdev); | 912 | radeon_gem_fini(rdev); |
864 | rs600_gart_fini(rdev); | 913 | rs600_gart_fini(rdev); |
@@ -932,7 +981,7 @@ int rs600_init(struct radeon_device *rdev) | |||
932 | /* Somethings want wront with the accel init stop accel */ | 981 | /* Somethings want wront with the accel init stop accel */ |
933 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | 982 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
934 | r100_cp_fini(rdev); | 983 | r100_cp_fini(rdev); |
935 | r100_wb_fini(rdev); | 984 | radeon_wb_fini(rdev); |
936 | r100_ib_fini(rdev); | 985 | r100_ib_fini(rdev); |
937 | rs600_gart_fini(rdev); | 986 | rs600_gart_fini(rdev); |
938 | radeon_irq_kms_fini(rdev); | 987 | radeon_irq_kms_fini(rdev); |