diff options
author | Jerome Glisse <jglisse@redhat.com> | 2009-09-07 20:10:24 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2009-09-07 21:15:52 -0400 |
commit | 3ce0a23d2d253185df24e22e3d5f89800bb3dd1c (patch) | |
tree | 4b4defdbe33aec7317101cce0f89c33083f8d17b /drivers/gpu/drm/radeon/radeon_device.c | |
parent | 4ce001abafafe77e5dd943d1480fc9f87894e96f (diff) |
drm/radeon/kms: add r600 KMS support
This adds the r600 KMS + CS support to the Linux kernel.
The r600 TTM support is quite basic and still needs more
work esp around using interrupts, but the polled fencing
should work okay for now.
Also currently TTM is using memcpy to do VRAM moves,
the code is here to use a 3D blit to do this, but
isn't fully debugged yet.
Authors:
Alex Deucher <alexdeucher@gmail.com>
Dave Airlie <airlied@redhat.com>
Jerome Glisse <jglisse@redhat.com>
Signed-off-by: Jerome Glisse <jglisse@redhat.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_device.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_device.c | 340 |
1 files changed, 191 insertions, 149 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 7693f7c67bd3..f2469c511789 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -37,7 +37,7 @@ | |||
37 | /* | 37 | /* |
38 | * Clear GPU surface registers. | 38 | * Clear GPU surface registers. |
39 | */ | 39 | */ |
40 | static void radeon_surface_init(struct radeon_device *rdev) | 40 | void radeon_surface_init(struct radeon_device *rdev) |
41 | { | 41 | { |
42 | /* FIXME: check this out */ | 42 | /* FIXME: check this out */ |
43 | if (rdev->family < CHIP_R600) { | 43 | if (rdev->family < CHIP_R600) { |
@@ -56,7 +56,7 @@ static void radeon_surface_init(struct radeon_device *rdev) | |||
56 | /* | 56 | /* |
57 | * GPU scratch registers helpers function. | 57 | * GPU scratch registers helpers function. |
58 | */ | 58 | */ |
59 | static void radeon_scratch_init(struct radeon_device *rdev) | 59 | void radeon_scratch_init(struct radeon_device *rdev) |
60 | { | 60 | { |
61 | int i; | 61 | int i; |
62 | 62 | ||
@@ -156,16 +156,14 @@ int radeon_mc_setup(struct radeon_device *rdev) | |||
156 | tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); | 156 | tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); |
157 | rdev->mc.gtt_location = tmp; | 157 | rdev->mc.gtt_location = tmp; |
158 | } | 158 | } |
159 | DRM_INFO("radeon: VRAM %uM\n", rdev->mc.real_vram_size >> 20); | 159 | DRM_INFO("radeon: VRAM %uM\n", (unsigned)(rdev->mc.mc_vram_size >> 20)); |
160 | DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n", | 160 | DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n", |
161 | rdev->mc.vram_location, | 161 | (unsigned)rdev->mc.vram_location, |
162 | rdev->mc.vram_location + rdev->mc.mc_vram_size - 1); | 162 | (unsigned)(rdev->mc.vram_location + rdev->mc.mc_vram_size - 1)); |
163 | if (rdev->mc.real_vram_size != rdev->mc.mc_vram_size) | 163 | DRM_INFO("radeon: GTT %uM\n", (unsigned)(rdev->mc.gtt_size >> 20)); |
164 | DRM_INFO("radeon: VRAM less than aperture workaround enabled\n"); | ||
165 | DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20); | ||
166 | DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n", | 164 | DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n", |
167 | rdev->mc.gtt_location, | 165 | (unsigned)rdev->mc.gtt_location, |
168 | rdev->mc.gtt_location + rdev->mc.gtt_size - 1); | 166 | (unsigned)(rdev->mc.gtt_location + rdev->mc.gtt_size - 1)); |
169 | return 0; | 167 | return 0; |
170 | } | 168 | } |
171 | 169 | ||
@@ -205,6 +203,31 @@ static bool radeon_card_posted(struct radeon_device *rdev) | |||
205 | 203 | ||
206 | } | 204 | } |
207 | 205 | ||
206 | int radeon_dummy_page_init(struct radeon_device *rdev) | ||
207 | { | ||
208 | rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); | ||
209 | if (rdev->dummy_page.page == NULL) | ||
210 | return -ENOMEM; | ||
211 | rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page, | ||
212 | 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
213 | if (!rdev->dummy_page.addr) { | ||
214 | __free_page(rdev->dummy_page.page); | ||
215 | rdev->dummy_page.page = NULL; | ||
216 | return -ENOMEM; | ||
217 | } | ||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | void radeon_dummy_page_fini(struct radeon_device *rdev) | ||
222 | { | ||
223 | if (rdev->dummy_page.page == NULL) | ||
224 | return; | ||
225 | pci_unmap_page(rdev->pdev, rdev->dummy_page.addr, | ||
226 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
227 | __free_page(rdev->dummy_page.page); | ||
228 | rdev->dummy_page.page = NULL; | ||
229 | } | ||
230 | |||
208 | 231 | ||
209 | /* | 232 | /* |
210 | * Registers accessors functions. | 233 | * Registers accessors functions. |
@@ -323,9 +346,15 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
323 | case CHIP_RV635: | 346 | case CHIP_RV635: |
324 | case CHIP_RV670: | 347 | case CHIP_RV670: |
325 | case CHIP_RS780: | 348 | case CHIP_RS780: |
349 | case CHIP_RS880: | ||
350 | rdev->asic = &r600_asic; | ||
351 | break; | ||
326 | case CHIP_RV770: | 352 | case CHIP_RV770: |
327 | case CHIP_RV730: | 353 | case CHIP_RV730: |
328 | case CHIP_RV710: | 354 | case CHIP_RV710: |
355 | case CHIP_RV740: | ||
356 | rdev->asic = &rv770_asic; | ||
357 | break; | ||
329 | default: | 358 | default: |
330 | /* FIXME: not supported yet */ | 359 | /* FIXME: not supported yet */ |
331 | return -EINVAL; | 360 | return -EINVAL; |
@@ -448,7 +477,7 @@ int radeon_device_init(struct radeon_device *rdev, | |||
448 | struct pci_dev *pdev, | 477 | struct pci_dev *pdev, |
449 | uint32_t flags) | 478 | uint32_t flags) |
450 | { | 479 | { |
451 | int r, ret; | 480 | int r, ret = 0; |
452 | int dma_bits; | 481 | int dma_bits; |
453 | 482 | ||
454 | DRM_INFO("radeon: Initializing kernel modesetting.\n"); | 483 | DRM_INFO("radeon: Initializing kernel modesetting.\n"); |
@@ -487,10 +516,6 @@ int radeon_device_init(struct radeon_device *rdev, | |||
487 | if (r) { | 516 | if (r) { |
488 | return r; | 517 | return r; |
489 | } | 518 | } |
490 | r = radeon_init(rdev); | ||
491 | if (r) { | ||
492 | return r; | ||
493 | } | ||
494 | 519 | ||
495 | /* set DMA mask + need_dma32 flags. | 520 | /* set DMA mask + need_dma32 flags. |
496 | * PCIE - can handle 40-bits. | 521 | * PCIE - can handle 40-bits. |
@@ -521,111 +546,118 @@ int radeon_device_init(struct radeon_device *rdev, | |||
521 | DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); | 546 | DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); |
522 | DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); | 547 | DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); |
523 | 548 | ||
524 | /* Setup errata flags */ | 549 | rdev->new_init_path = false; |
525 | radeon_errata(rdev); | 550 | r = radeon_init(rdev); |
526 | /* Initialize scratch registers */ | 551 | if (r) { |
527 | radeon_scratch_init(rdev); | 552 | return r; |
528 | /* Initialize surface registers */ | 553 | } |
529 | radeon_surface_init(rdev); | 554 | if (!rdev->new_init_path) { |
530 | 555 | /* Setup errata flags */ | |
531 | /* TODO: disable VGA need to use VGA request */ | 556 | radeon_errata(rdev); |
532 | /* BIOS*/ | 557 | /* Initialize scratch registers */ |
533 | if (!radeon_get_bios(rdev)) { | 558 | radeon_scratch_init(rdev); |
534 | if (ASIC_IS_AVIVO(rdev)) | 559 | /* Initialize surface registers */ |
535 | return -EINVAL; | 560 | radeon_surface_init(rdev); |
536 | } | 561 | |
537 | if (rdev->is_atom_bios) { | 562 | /* TODO: disable VGA need to use VGA request */ |
538 | r = radeon_atombios_init(rdev); | 563 | /* BIOS*/ |
564 | if (!radeon_get_bios(rdev)) { | ||
565 | if (ASIC_IS_AVIVO(rdev)) | ||
566 | return -EINVAL; | ||
567 | } | ||
568 | if (rdev->is_atom_bios) { | ||
569 | r = radeon_atombios_init(rdev); | ||
570 | if (r) { | ||
571 | return r; | ||
572 | } | ||
573 | } else { | ||
574 | r = radeon_combios_init(rdev); | ||
575 | if (r) { | ||
576 | return r; | ||
577 | } | ||
578 | } | ||
579 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | ||
580 | if (radeon_gpu_reset(rdev)) { | ||
581 | /* FIXME: what do we want to do here ? */ | ||
582 | } | ||
583 | /* check if cards are posted or not */ | ||
584 | if (!radeon_card_posted(rdev) && rdev->bios) { | ||
585 | DRM_INFO("GPU not posted. posting now...\n"); | ||
586 | if (rdev->is_atom_bios) { | ||
587 | atom_asic_init(rdev->mode_info.atom_context); | ||
588 | } else { | ||
589 | radeon_combios_asic_init(rdev->ddev); | ||
590 | } | ||
591 | } | ||
592 | /* Initialize clocks */ | ||
593 | r = radeon_clocks_init(rdev); | ||
539 | if (r) { | 594 | if (r) { |
540 | return r; | 595 | return r; |
541 | } | 596 | } |
542 | } else { | 597 | /* Get vram informations */ |
543 | r = radeon_combios_init(rdev); | 598 | radeon_vram_info(rdev); |
599 | |||
600 | /* Add an MTRR for the VRAM */ | ||
601 | rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, | ||
602 | MTRR_TYPE_WRCOMB, 1); | ||
603 | DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n", | ||
604 | (unsigned)(rdev->mc.mc_vram_size >> 20), | ||
605 | (unsigned)(rdev->mc.aper_size >> 20)); | ||
606 | DRM_INFO("RAM width %dbits %cDR\n", | ||
607 | rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); | ||
608 | /* Initialize memory controller (also test AGP) */ | ||
609 | r = radeon_mc_init(rdev); | ||
544 | if (r) { | 610 | if (r) { |
545 | return r; | 611 | return r; |
546 | } | 612 | } |
547 | } | 613 | /* Fence driver */ |
548 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | 614 | r = radeon_fence_driver_init(rdev); |
549 | if (radeon_gpu_reset(rdev)) { | ||
550 | /* FIXME: what do we want to do here ? */ | ||
551 | } | ||
552 | /* check if cards are posted or not */ | ||
553 | if (!radeon_card_posted(rdev) && rdev->bios) { | ||
554 | DRM_INFO("GPU not posted. posting now...\n"); | ||
555 | if (rdev->is_atom_bios) { | ||
556 | atom_asic_init(rdev->mode_info.atom_context); | ||
557 | } else { | ||
558 | radeon_combios_asic_init(rdev->ddev); | ||
559 | } | ||
560 | } | ||
561 | /* Initialize clocks */ | ||
562 | r = radeon_clocks_init(rdev); | ||
563 | if (r) { | ||
564 | return r; | ||
565 | } | ||
566 | /* Get vram informations */ | ||
567 | radeon_vram_info(rdev); | ||
568 | |||
569 | /* Add an MTRR for the VRAM */ | ||
570 | rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, | ||
571 | MTRR_TYPE_WRCOMB, 1); | ||
572 | DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n", | ||
573 | rdev->mc.real_vram_size >> 20, | ||
574 | (unsigned)rdev->mc.aper_size >> 20); | ||
575 | DRM_INFO("RAM width %dbits %cDR\n", | ||
576 | rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); | ||
577 | /* Initialize memory controller (also test AGP) */ | ||
578 | r = radeon_mc_init(rdev); | ||
579 | if (r) { | ||
580 | return r; | ||
581 | } | ||
582 | /* Fence driver */ | ||
583 | r = radeon_fence_driver_init(rdev); | ||
584 | if (r) { | ||
585 | return r; | ||
586 | } | ||
587 | r = radeon_irq_kms_init(rdev); | ||
588 | if (r) { | ||
589 | return r; | ||
590 | } | ||
591 | /* Memory manager */ | ||
592 | r = radeon_object_init(rdev); | ||
593 | if (r) { | ||
594 | return r; | ||
595 | } | ||
596 | /* Initialize GART (initialize after TTM so we can allocate | ||
597 | * memory through TTM but finalize after TTM) */ | ||
598 | r = radeon_gart_enable(rdev); | ||
599 | if (!r) { | ||
600 | r = radeon_gem_init(rdev); | ||
601 | } | ||
602 | |||
603 | /* 1M ring buffer */ | ||
604 | if (!r) { | ||
605 | r = radeon_cp_init(rdev, 1024 * 1024); | ||
606 | } | ||
607 | if (!r) { | ||
608 | r = radeon_wb_init(rdev); | ||
609 | if (r) { | 615 | if (r) { |
610 | DRM_ERROR("radeon: failled initializing WB (%d).\n", r); | ||
611 | return r; | 616 | return r; |
612 | } | 617 | } |
613 | } | 618 | r = radeon_irq_kms_init(rdev); |
614 | if (!r) { | ||
615 | r = radeon_ib_pool_init(rdev); | ||
616 | if (r) { | 619 | if (r) { |
617 | DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); | ||
618 | return r; | 620 | return r; |
619 | } | 621 | } |
620 | } | 622 | /* Memory manager */ |
621 | if (!r) { | 623 | r = radeon_object_init(rdev); |
622 | r = radeon_ib_test(rdev); | ||
623 | if (r) { | 624 | if (r) { |
624 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | ||
625 | return r; | 625 | return r; |
626 | } | 626 | } |
627 | /* Initialize GART (initialize after TTM so we can allocate | ||
628 | * memory through TTM but finalize after TTM) */ | ||
629 | r = radeon_gart_enable(rdev); | ||
630 | if (!r) { | ||
631 | r = radeon_gem_init(rdev); | ||
632 | } | ||
633 | |||
634 | /* 1M ring buffer */ | ||
635 | if (!r) { | ||
636 | r = radeon_cp_init(rdev, 1024 * 1024); | ||
637 | } | ||
638 | if (!r) { | ||
639 | r = radeon_wb_init(rdev); | ||
640 | if (r) { | ||
641 | DRM_ERROR("radeon: failled initializing WB (%d).\n", r); | ||
642 | return r; | ||
643 | } | ||
644 | } | ||
645 | if (!r) { | ||
646 | r = radeon_ib_pool_init(rdev); | ||
647 | if (r) { | ||
648 | DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); | ||
649 | return r; | ||
650 | } | ||
651 | } | ||
652 | if (!r) { | ||
653 | r = radeon_ib_test(rdev); | ||
654 | if (r) { | ||
655 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | ||
656 | return r; | ||
657 | } | ||
658 | } | ||
659 | ret = r; | ||
627 | } | 660 | } |
628 | ret = r; | ||
629 | r = radeon_modeset_init(rdev); | 661 | r = radeon_modeset_init(rdev); |
630 | if (r) { | 662 | if (r) { |
631 | return r; | 663 | return r; |
@@ -651,26 +683,29 @@ void radeon_device_fini(struct radeon_device *rdev) | |||
651 | rdev->shutdown = true; | 683 | rdev->shutdown = true; |
652 | /* Order matter so becarefull if you rearrange anythings */ | 684 | /* Order matter so becarefull if you rearrange anythings */ |
653 | radeon_modeset_fini(rdev); | 685 | radeon_modeset_fini(rdev); |
654 | radeon_ib_pool_fini(rdev); | 686 | if (!rdev->new_init_path) { |
655 | radeon_cp_fini(rdev); | 687 | radeon_ib_pool_fini(rdev); |
656 | radeon_wb_fini(rdev); | 688 | radeon_cp_fini(rdev); |
657 | radeon_gem_fini(rdev); | 689 | radeon_wb_fini(rdev); |
658 | radeon_object_fini(rdev); | 690 | radeon_gem_fini(rdev); |
659 | /* mc_fini must be after object_fini */ | 691 | radeon_mc_fini(rdev); |
660 | radeon_mc_fini(rdev); | ||
661 | #if __OS_HAS_AGP | 692 | #if __OS_HAS_AGP |
662 | radeon_agp_fini(rdev); | 693 | radeon_agp_fini(rdev); |
663 | #endif | 694 | #endif |
664 | radeon_irq_kms_fini(rdev); | 695 | radeon_irq_kms_fini(rdev); |
665 | radeon_fence_driver_fini(rdev); | 696 | radeon_fence_driver_fini(rdev); |
666 | radeon_clocks_fini(rdev); | 697 | radeon_clocks_fini(rdev); |
667 | if (rdev->is_atom_bios) { | 698 | radeon_object_fini(rdev); |
668 | radeon_atombios_fini(rdev); | 699 | if (rdev->is_atom_bios) { |
700 | radeon_atombios_fini(rdev); | ||
701 | } else { | ||
702 | radeon_combios_fini(rdev); | ||
703 | } | ||
704 | kfree(rdev->bios); | ||
705 | rdev->bios = NULL; | ||
669 | } else { | 706 | } else { |
670 | radeon_combios_fini(rdev); | 707 | radeon_fini(rdev); |
671 | } | 708 | } |
672 | kfree(rdev->bios); | ||
673 | rdev->bios = NULL; | ||
674 | iounmap(rdev->rmmio); | 709 | iounmap(rdev->rmmio); |
675 | rdev->rmmio = NULL; | 710 | rdev->rmmio = NULL; |
676 | } | 711 | } |
@@ -708,9 +743,12 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
708 | /* wait for gpu to finish processing current batch */ | 743 | /* wait for gpu to finish processing current batch */ |
709 | radeon_fence_wait_last(rdev); | 744 | radeon_fence_wait_last(rdev); |
710 | 745 | ||
711 | radeon_cp_disable(rdev); | 746 | if (!rdev->new_init_path) { |
712 | radeon_gart_disable(rdev); | 747 | radeon_cp_disable(rdev); |
713 | 748 | radeon_gart_disable(rdev); | |
749 | } else { | ||
750 | radeon_suspend(rdev); | ||
751 | } | ||
714 | /* evict remaining vram memory */ | 752 | /* evict remaining vram memory */ |
715 | radeon_object_evict_vram(rdev); | 753 | radeon_object_evict_vram(rdev); |
716 | 754 | ||
@@ -746,33 +784,37 @@ int radeon_resume_kms(struct drm_device *dev) | |||
746 | if (radeon_gpu_reset(rdev)) { | 784 | if (radeon_gpu_reset(rdev)) { |
747 | /* FIXME: what do we want to do here ? */ | 785 | /* FIXME: what do we want to do here ? */ |
748 | } | 786 | } |
749 | /* post card */ | 787 | if (!rdev->new_init_path) { |
750 | if (rdev->is_atom_bios) { | 788 | /* post card */ |
751 | atom_asic_init(rdev->mode_info.atom_context); | 789 | if (rdev->is_atom_bios) { |
790 | atom_asic_init(rdev->mode_info.atom_context); | ||
791 | } else { | ||
792 | radeon_combios_asic_init(rdev->ddev); | ||
793 | } | ||
794 | /* Initialize clocks */ | ||
795 | r = radeon_clocks_init(rdev); | ||
796 | if (r) { | ||
797 | release_console_sem(); | ||
798 | return r; | ||
799 | } | ||
800 | /* Enable IRQ */ | ||
801 | rdev->irq.sw_int = true; | ||
802 | radeon_irq_set(rdev); | ||
803 | /* Initialize GPU Memory Controller */ | ||
804 | r = radeon_mc_init(rdev); | ||
805 | if (r) { | ||
806 | goto out; | ||
807 | } | ||
808 | r = radeon_gart_enable(rdev); | ||
809 | if (r) { | ||
810 | goto out; | ||
811 | } | ||
812 | r = radeon_cp_init(rdev, rdev->cp.ring_size); | ||
813 | if (r) { | ||
814 | goto out; | ||
815 | } | ||
752 | } else { | 816 | } else { |
753 | radeon_combios_asic_init(rdev->ddev); | 817 | radeon_resume(rdev); |
754 | } | ||
755 | /* Initialize clocks */ | ||
756 | r = radeon_clocks_init(rdev); | ||
757 | if (r) { | ||
758 | release_console_sem(); | ||
759 | return r; | ||
760 | } | ||
761 | /* Enable IRQ */ | ||
762 | rdev->irq.sw_int = true; | ||
763 | radeon_irq_set(rdev); | ||
764 | /* Initialize GPU Memory Controller */ | ||
765 | r = radeon_mc_init(rdev); | ||
766 | if (r) { | ||
767 | goto out; | ||
768 | } | ||
769 | r = radeon_gart_enable(rdev); | ||
770 | if (r) { | ||
771 | goto out; | ||
772 | } | ||
773 | r = radeon_cp_init(rdev, rdev->cp.ring_size); | ||
774 | if (r) { | ||
775 | goto out; | ||
776 | } | 818 | } |
777 | out: | 819 | out: |
778 | fb_set_suspend(rdev->fbdev_info, 0); | 820 | fb_set_suspend(rdev->fbdev_info, 0); |