aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2013-01-07 23:52:52 -0500
committerDave Airlie <airlied@redhat.com>2013-01-07 23:52:52 -0500
commitc75be2592c350035ce871624ebf549b4609aeeab (patch)
treeda25117dac120ea28c36a33a4ca6fd3eb3a4ebbf /drivers/gpu
parentbe8a42ae60addd8b6092535c11b42d099d6470ec (diff)
parentd1f9809ed1315c4cdc5760cf2f59626fd3276952 (diff)
Merge branch 'drm-fixes-3.8' of git://people.freedesktop.org/~agd5f/linux into drm-next
Alex writes: A few more fixes for DMA and a mac quirk. * 'drm-fixes-3.8' of git://people.freedesktop.org/~agd5f/linux: drm/radeon: add quirk for d3 delay during switcheroo poweron for apple macbooks drm/radeon: fix DMA CS parser for r6xx linear copy packet drm/radeon: split r6xx and r7xx copy_dma functions
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/radeon/r600.c10
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c28
-rw-r--r--drivers/gpu/drm/radeon/rv770.c74
6 files changed, 135 insertions, 16 deletions
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 923f93647042..537e259b3837 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2646,7 +2646,7 @@ int r600_copy_blit(struct radeon_device *rdev,
2646 * @num_gpu_pages: number of GPU pages to xfer 2646 * @num_gpu_pages: number of GPU pages to xfer
2647 * @fence: radeon fence object 2647 * @fence: radeon fence object
2648 * 2648 *
2649 * Copy GPU paging using the DMA engine (r6xx-r7xx). 2649 * Copy GPU paging using the DMA engine (r6xx).
2650 * Used by the radeon ttm implementation to move pages if 2650 * Used by the radeon ttm implementation to move pages if
2651 * registered as the asic copy callback. 2651 * registered as the asic copy callback.
2652 */ 2652 */
@@ -2669,8 +2669,8 @@ int r600_copy_dma(struct radeon_device *rdev,
2669 } 2669 }
2670 2670
2671 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; 2671 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
2672 num_loops = DIV_ROUND_UP(size_in_dw, 0xffff); 2672 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
2673 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8); 2673 r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
2674 if (r) { 2674 if (r) {
2675 DRM_ERROR("radeon: moving bo (%d).\n", r); 2675 DRM_ERROR("radeon: moving bo (%d).\n", r);
2676 radeon_semaphore_free(rdev, &sem, NULL); 2676 radeon_semaphore_free(rdev, &sem, NULL);
@@ -2693,8 +2693,8 @@ int r600_copy_dma(struct radeon_device *rdev,
2693 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); 2693 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
2694 radeon_ring_write(ring, dst_offset & 0xfffffffc); 2694 radeon_ring_write(ring, dst_offset & 0xfffffffc);
2695 radeon_ring_write(ring, src_offset & 0xfffffffc); 2695 radeon_ring_write(ring, src_offset & 0xfffffffc);
2696 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); 2696 radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
2697 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); 2697 (upper_32_bits(src_offset) & 0xff)));
2698 src_offset += cur_size_in_dw * 4; 2698 src_offset += cur_size_in_dw * 4;
2699 dst_offset += cur_size_in_dw * 4; 2699 dst_offset += cur_size_in_dw * 4;
2700 } 2700 }
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 9ea13d07cc55..03191a56eb44 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -2677,16 +2677,29 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
2677 } 2677 }
2678 p->idx += 7; 2678 p->idx += 7;
2679 } else { 2679 } else {
2680 src_offset = ib[idx+2]; 2680 if (p->family >= CHIP_RV770) {
2681 src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; 2681 src_offset = ib[idx+2];
2682 dst_offset = ib[idx+1]; 2682 src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
2683 dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; 2683 dst_offset = ib[idx+1];
2684 dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
2684 2685
2685 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2686 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2686 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2687 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2687 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2688 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2688 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2689 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2689 p->idx += 5; 2690 p->idx += 5;
2691 } else {
2692 src_offset = ib[idx+2];
2693 src_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
2694 dst_offset = ib[idx+1];
2695 dst_offset |= ((u64)(ib[idx+3] & 0xff0000)) << 16;
2696
2697 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2698 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2699 ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2700 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16;
2701 p->idx += 4;
2702 }
2690 } 2703 }
2691 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 2704 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2692 dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n", 2705 dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 596bcbe80ed0..9056fafb00ea 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1140,9 +1140,9 @@ static struct radeon_asic rv770_asic = {
1140 .copy = { 1140 .copy = {
1141 .blit = &r600_copy_blit, 1141 .blit = &r600_copy_blit,
1142 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1142 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1143 .dma = &r600_copy_dma, 1143 .dma = &rv770_copy_dma,
1144 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 1144 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1145 .copy = &r600_copy_dma, 1145 .copy = &rv770_copy_dma,
1146 .copy_ring_index = R600_RING_TYPE_DMA_INDEX, 1146 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1147 }, 1147 },
1148 .surface = { 1148 .surface = {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 5f4882cc2152..15d70e613076 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -403,6 +403,10 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
403void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 403void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
404void r700_cp_stop(struct radeon_device *rdev); 404void r700_cp_stop(struct radeon_device *rdev);
405void r700_cp_fini(struct radeon_device *rdev); 405void r700_cp_fini(struct radeon_device *rdev);
406int rv770_copy_dma(struct radeon_device *rdev,
407 uint64_t src_offset, uint64_t dst_offset,
408 unsigned num_gpu_pages,
409 struct radeon_fence **fence);
406 410
407/* 411/*
408 * evergreen 412 * evergreen
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index cd756262924d..edfc54e41842 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -897,6 +897,25 @@ static void radeon_check_arguments(struct radeon_device *rdev)
897} 897}
898 898
899/** 899/**
900 * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
901 * needed for waking up.
902 *
903 * @pdev: pci dev pointer
904 */
905static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
906{
907
908 /* 6600m in a macbook pro */
909 if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
910 pdev->subsystem_device == 0x00e2) {
911 printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
912 return true;
913 }
914
915 return false;
916}
917
918/**
900 * radeon_switcheroo_set_state - set switcheroo state 919 * radeon_switcheroo_set_state - set switcheroo state
901 * 920 *
902 * @pdev: pci dev pointer 921 * @pdev: pci dev pointer
@@ -910,10 +929,19 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
910 struct drm_device *dev = pci_get_drvdata(pdev); 929 struct drm_device *dev = pci_get_drvdata(pdev);
911 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 930 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
912 if (state == VGA_SWITCHEROO_ON) { 931 if (state == VGA_SWITCHEROO_ON) {
932 unsigned d3_delay = dev->pdev->d3_delay;
933
913 printk(KERN_INFO "radeon: switched on\n"); 934 printk(KERN_INFO "radeon: switched on\n");
914 /* don't suspend or resume card normally */ 935 /* don't suspend or resume card normally */
915 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 936 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
937
938 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
939 dev->pdev->d3_delay = 20;
940
916 radeon_resume_kms(dev); 941 radeon_resume_kms(dev);
942
943 dev->pdev->d3_delay = d3_delay;
944
917 dev->switch_power_state = DRM_SWITCH_POWER_ON; 945 dev->switch_power_state = DRM_SWITCH_POWER_ON;
918 drm_kms_helper_poll_enable(dev); 946 drm_kms_helper_poll_enable(dev);
919 } else { 947 } else {
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 87c979c4f721..1b2444f4d8f4 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -887,6 +887,80 @@ static int rv770_mc_init(struct radeon_device *rdev)
887 return 0; 887 return 0;
888} 888}
889 889
890/**
891 * rv770_copy_dma - copy pages using the DMA engine
892 *
893 * @rdev: radeon_device pointer
894 * @src_offset: src GPU address
895 * @dst_offset: dst GPU address
896 * @num_gpu_pages: number of GPU pages to xfer
897 * @fence: radeon fence object
898 *
899 * Copy GPU paging using the DMA engine (r7xx).
900 * Used by the radeon ttm implementation to move pages if
901 * registered as the asic copy callback.
902 */
903int rv770_copy_dma(struct radeon_device *rdev,
904 uint64_t src_offset, uint64_t dst_offset,
905 unsigned num_gpu_pages,
906 struct radeon_fence **fence)
907{
908 struct radeon_semaphore *sem = NULL;
909 int ring_index = rdev->asic->copy.dma_ring_index;
910 struct radeon_ring *ring = &rdev->ring[ring_index];
911 u32 size_in_dw, cur_size_in_dw;
912 int i, num_loops;
913 int r = 0;
914
915 r = radeon_semaphore_create(rdev, &sem);
916 if (r) {
917 DRM_ERROR("radeon: moving bo (%d).\n", r);
918 return r;
919 }
920
921 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
922 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
923 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
924 if (r) {
925 DRM_ERROR("radeon: moving bo (%d).\n", r);
926 radeon_semaphore_free(rdev, &sem, NULL);
927 return r;
928 }
929
930 if (radeon_fence_need_sync(*fence, ring->idx)) {
931 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
932 ring->idx);
933 radeon_fence_note_sync(*fence, ring->idx);
934 } else {
935 radeon_semaphore_free(rdev, &sem, NULL);
936 }
937
938 for (i = 0; i < num_loops; i++) {
939 cur_size_in_dw = size_in_dw;
940 if (cur_size_in_dw > 0xFFFF)
941 cur_size_in_dw = 0xFFFF;
942 size_in_dw -= cur_size_in_dw;
943 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
944 radeon_ring_write(ring, dst_offset & 0xfffffffc);
945 radeon_ring_write(ring, src_offset & 0xfffffffc);
946 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
947 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
948 src_offset += cur_size_in_dw * 4;
949 dst_offset += cur_size_in_dw * 4;
950 }
951
952 r = radeon_fence_emit(rdev, fence, ring->idx);
953 if (r) {
954 radeon_ring_unlock_undo(rdev, ring);
955 return r;
956 }
957
958 radeon_ring_unlock_commit(rdev, ring);
959 radeon_semaphore_free(rdev, &sem, *fence);
960
961 return r;
962}
963
890static int rv770_startup(struct radeon_device *rdev) 964static int rv770_startup(struct radeon_device *rdev)
891{ 965{
892 struct radeon_ring *ring; 966 struct radeon_ring *ring;