diff options
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/radeon/evergreen.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/ni.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r100.c | 22 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r200.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r600.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon.h | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_asic.h | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_display.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_encoders.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_ttm.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 3 |
11 files changed, 59 insertions, 43 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index dc0a5b56c81a..e8a746712b5b 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -1404,7 +1404,8 @@ int evergreen_cp_resume(struct radeon_device *rdev) | |||
1404 | /* Initialize the ring buffer's read and write pointers */ | 1404 | /* Initialize the ring buffer's read and write pointers */ |
1405 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); | 1405 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); |
1406 | WREG32(CP_RB_RPTR_WR, 0); | 1406 | WREG32(CP_RB_RPTR_WR, 0); |
1407 | WREG32(CP_RB_WPTR, 0); | 1407 | rdev->cp.wptr = 0; |
1408 | WREG32(CP_RB_WPTR, rdev->cp.wptr); | ||
1408 | 1409 | ||
1409 | /* set the wb address wether it's enabled or not */ | 1410 | /* set the wb address wether it's enabled or not */ |
1410 | WREG32(CP_RB_RPTR_ADDR, | 1411 | WREG32(CP_RB_RPTR_ADDR, |
@@ -1426,7 +1427,6 @@ int evergreen_cp_resume(struct radeon_device *rdev) | |||
1426 | WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); | 1427 | WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); |
1427 | 1428 | ||
1428 | rdev->cp.rptr = RREG32(CP_RB_RPTR); | 1429 | rdev->cp.rptr = RREG32(CP_RB_RPTR); |
1429 | rdev->cp.wptr = RREG32(CP_RB_WPTR); | ||
1430 | 1430 | ||
1431 | evergreen_cp_start(rdev); | 1431 | evergreen_cp_start(rdev); |
1432 | rdev->cp.ready = true; | 1432 | rdev->cp.ready = true; |
@@ -3171,21 +3171,23 @@ int evergreen_suspend(struct radeon_device *rdev) | |||
3171 | } | 3171 | } |
3172 | 3172 | ||
3173 | int evergreen_copy_blit(struct radeon_device *rdev, | 3173 | int evergreen_copy_blit(struct radeon_device *rdev, |
3174 | uint64_t src_offset, uint64_t dst_offset, | 3174 | uint64_t src_offset, |
3175 | unsigned num_pages, struct radeon_fence *fence) | 3175 | uint64_t dst_offset, |
3176 | unsigned num_gpu_pages, | ||
3177 | struct radeon_fence *fence) | ||
3176 | { | 3178 | { |
3177 | int r; | 3179 | int r; |
3178 | 3180 | ||
3179 | mutex_lock(&rdev->r600_blit.mutex); | 3181 | mutex_lock(&rdev->r600_blit.mutex); |
3180 | rdev->r600_blit.vb_ib = NULL; | 3182 | rdev->r600_blit.vb_ib = NULL; |
3181 | r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); | 3183 | r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE); |
3182 | if (r) { | 3184 | if (r) { |
3183 | if (rdev->r600_blit.vb_ib) | 3185 | if (rdev->r600_blit.vb_ib) |
3184 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); | 3186 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); |
3185 | mutex_unlock(&rdev->r600_blit.mutex); | 3187 | mutex_unlock(&rdev->r600_blit.mutex); |
3186 | return r; | 3188 | return r; |
3187 | } | 3189 | } |
3188 | evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); | 3190 | evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE); |
3189 | evergreen_blit_done_copy(rdev, fence); | 3191 | evergreen_blit_done_copy(rdev, fence); |
3190 | mutex_unlock(&rdev->r600_blit.mutex); | 3192 | mutex_unlock(&rdev->r600_blit.mutex); |
3191 | return 0; | 3193 | return 0; |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index cbf57d75d925..99fbd793c08c 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -1187,7 +1187,8 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1187 | 1187 | ||
1188 | /* Initialize the ring buffer's read and write pointers */ | 1188 | /* Initialize the ring buffer's read and write pointers */ |
1189 | WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); | 1189 | WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); |
1190 | WREG32(CP_RB0_WPTR, 0); | 1190 | rdev->cp.wptr = 0; |
1191 | WREG32(CP_RB0_WPTR, rdev->cp.wptr); | ||
1191 | 1192 | ||
1192 | /* set the wb address wether it's enabled or not */ | 1193 | /* set the wb address wether it's enabled or not */ |
1193 | WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); | 1194 | WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); |
@@ -1207,7 +1208,6 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1207 | WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8); | 1208 | WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8); |
1208 | 1209 | ||
1209 | rdev->cp.rptr = RREG32(CP_RB0_RPTR); | 1210 | rdev->cp.rptr = RREG32(CP_RB0_RPTR); |
1210 | rdev->cp.wptr = RREG32(CP_RB0_WPTR); | ||
1211 | 1211 | ||
1212 | /* ring1 - compute only */ | 1212 | /* ring1 - compute only */ |
1213 | /* Set ring buffer size */ | 1213 | /* Set ring buffer size */ |
@@ -1220,7 +1220,8 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1220 | 1220 | ||
1221 | /* Initialize the ring buffer's read and write pointers */ | 1221 | /* Initialize the ring buffer's read and write pointers */ |
1222 | WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); | 1222 | WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); |
1223 | WREG32(CP_RB1_WPTR, 0); | 1223 | rdev->cp1.wptr = 0; |
1224 | WREG32(CP_RB1_WPTR, rdev->cp1.wptr); | ||
1224 | 1225 | ||
1225 | /* set the wb address wether it's enabled or not */ | 1226 | /* set the wb address wether it's enabled or not */ |
1226 | WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); | 1227 | WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); |
@@ -1232,7 +1233,6 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1232 | WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8); | 1233 | WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8); |
1233 | 1234 | ||
1234 | rdev->cp1.rptr = RREG32(CP_RB1_RPTR); | 1235 | rdev->cp1.rptr = RREG32(CP_RB1_RPTR); |
1235 | rdev->cp1.wptr = RREG32(CP_RB1_WPTR); | ||
1236 | 1236 | ||
1237 | /* ring2 - compute only */ | 1237 | /* ring2 - compute only */ |
1238 | /* Set ring buffer size */ | 1238 | /* Set ring buffer size */ |
@@ -1245,7 +1245,8 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1245 | 1245 | ||
1246 | /* Initialize the ring buffer's read and write pointers */ | 1246 | /* Initialize the ring buffer's read and write pointers */ |
1247 | WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); | 1247 | WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); |
1248 | WREG32(CP_RB2_WPTR, 0); | 1248 | rdev->cp2.wptr = 0; |
1249 | WREG32(CP_RB2_WPTR, rdev->cp2.wptr); | ||
1249 | 1250 | ||
1250 | /* set the wb address wether it's enabled or not */ | 1251 | /* set the wb address wether it's enabled or not */ |
1251 | WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); | 1252 | WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); |
@@ -1257,7 +1258,6 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1257 | WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8); | 1258 | WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8); |
1258 | 1259 | ||
1259 | rdev->cp2.rptr = RREG32(CP_RB2_RPTR); | 1260 | rdev->cp2.rptr = RREG32(CP_RB2_RPTR); |
1260 | rdev->cp2.wptr = RREG32(CP_RB2_WPTR); | ||
1261 | 1261 | ||
1262 | /* start the rings */ | 1262 | /* start the rings */ |
1263 | cayman_cp_start(rdev); | 1263 | cayman_cp_start(rdev); |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index f2204cb1ccdf..7fcdbbbf2979 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -721,11 +721,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev, | |||
721 | int r100_copy_blit(struct radeon_device *rdev, | 721 | int r100_copy_blit(struct radeon_device *rdev, |
722 | uint64_t src_offset, | 722 | uint64_t src_offset, |
723 | uint64_t dst_offset, | 723 | uint64_t dst_offset, |
724 | unsigned num_pages, | 724 | unsigned num_gpu_pages, |
725 | struct radeon_fence *fence) | 725 | struct radeon_fence *fence) |
726 | { | 726 | { |
727 | uint32_t cur_pages; | 727 | uint32_t cur_pages; |
728 | uint32_t stride_bytes = PAGE_SIZE; | 728 | uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; |
729 | uint32_t pitch; | 729 | uint32_t pitch; |
730 | uint32_t stride_pixels; | 730 | uint32_t stride_pixels; |
731 | unsigned ndw; | 731 | unsigned ndw; |
@@ -737,7 +737,7 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
737 | /* radeon pitch is /64 */ | 737 | /* radeon pitch is /64 */ |
738 | pitch = stride_bytes / 64; | 738 | pitch = stride_bytes / 64; |
739 | stride_pixels = stride_bytes / 4; | 739 | stride_pixels = stride_bytes / 4; |
740 | num_loops = DIV_ROUND_UP(num_pages, 8191); | 740 | num_loops = DIV_ROUND_UP(num_gpu_pages, 8191); |
741 | 741 | ||
742 | /* Ask for enough room for blit + flush + fence */ | 742 | /* Ask for enough room for blit + flush + fence */ |
743 | ndw = 64 + (10 * num_loops); | 743 | ndw = 64 + (10 * num_loops); |
@@ -746,12 +746,12 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
746 | DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); | 746 | DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); |
747 | return -EINVAL; | 747 | return -EINVAL; |
748 | } | 748 | } |
749 | while (num_pages > 0) { | 749 | while (num_gpu_pages > 0) { |
750 | cur_pages = num_pages; | 750 | cur_pages = num_gpu_pages; |
751 | if (cur_pages > 8191) { | 751 | if (cur_pages > 8191) { |
752 | cur_pages = 8191; | 752 | cur_pages = 8191; |
753 | } | 753 | } |
754 | num_pages -= cur_pages; | 754 | num_gpu_pages -= cur_pages; |
755 | 755 | ||
756 | /* pages are in Y direction - height | 756 | /* pages are in Y direction - height |
757 | page width in X direction - width */ | 757 | page width in X direction - width */ |
@@ -773,8 +773,8 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
773 | radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); | 773 | radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); |
774 | radeon_ring_write(rdev, 0); | 774 | radeon_ring_write(rdev, 0); |
775 | radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); | 775 | radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); |
776 | radeon_ring_write(rdev, num_pages); | 776 | radeon_ring_write(rdev, num_gpu_pages); |
777 | radeon_ring_write(rdev, num_pages); | 777 | radeon_ring_write(rdev, num_gpu_pages); |
778 | radeon_ring_write(rdev, cur_pages | (stride_pixels << 16)); | 778 | radeon_ring_write(rdev, cur_pages | (stride_pixels << 16)); |
779 | } | 779 | } |
780 | radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); | 780 | radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); |
@@ -990,7 +990,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
990 | /* Force read & write ptr to 0 */ | 990 | /* Force read & write ptr to 0 */ |
991 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); | 991 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); |
992 | WREG32(RADEON_CP_RB_RPTR_WR, 0); | 992 | WREG32(RADEON_CP_RB_RPTR_WR, 0); |
993 | WREG32(RADEON_CP_RB_WPTR, 0); | 993 | rdev->cp.wptr = 0; |
994 | WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); | ||
994 | 995 | ||
995 | /* set the wb address whether it's enabled or not */ | 996 | /* set the wb address whether it's enabled or not */ |
996 | WREG32(R_00070C_CP_RB_RPTR_ADDR, | 997 | WREG32(R_00070C_CP_RB_RPTR_ADDR, |
@@ -1007,9 +1008,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
1007 | WREG32(RADEON_CP_RB_CNTL, tmp); | 1008 | WREG32(RADEON_CP_RB_CNTL, tmp); |
1008 | udelay(10); | 1009 | udelay(10); |
1009 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); | 1010 | rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); |
1010 | rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR); | ||
1011 | /* protect against crazy HW on resume */ | ||
1012 | rdev->cp.wptr &= rdev->cp.ptr_mask; | ||
1013 | /* Set cp mode to bus mastering & enable cp*/ | 1011 | /* Set cp mode to bus mastering & enable cp*/ |
1014 | WREG32(RADEON_CP_CSQ_MODE, | 1012 | WREG32(RADEON_CP_CSQ_MODE, |
1015 | REG_SET(RADEON_INDIRECT2_START, indirect2_start) | | 1013 | REG_SET(RADEON_INDIRECT2_START, indirect2_start) | |
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index f24058300413..a1f3ba063c2d 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c | |||
@@ -84,7 +84,7 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0) | |||
84 | int r200_copy_dma(struct radeon_device *rdev, | 84 | int r200_copy_dma(struct radeon_device *rdev, |
85 | uint64_t src_offset, | 85 | uint64_t src_offset, |
86 | uint64_t dst_offset, | 86 | uint64_t dst_offset, |
87 | unsigned num_pages, | 87 | unsigned num_gpu_pages, |
88 | struct radeon_fence *fence) | 88 | struct radeon_fence *fence) |
89 | { | 89 | { |
90 | uint32_t size; | 90 | uint32_t size; |
@@ -93,7 +93,7 @@ int r200_copy_dma(struct radeon_device *rdev, | |||
93 | int r = 0; | 93 | int r = 0; |
94 | 94 | ||
95 | /* radeon pitch is /64 */ | 95 | /* radeon pitch is /64 */ |
96 | size = num_pages << PAGE_SHIFT; | 96 | size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT; |
97 | num_loops = DIV_ROUND_UP(size, 0x1FFFFF); | 97 | num_loops = DIV_ROUND_UP(size, 0x1FFFFF); |
98 | r = radeon_ring_lock(rdev, num_loops * 4 + 64); | 98 | r = radeon_ring_lock(rdev, num_loops * 4 + 64); |
99 | if (r) { | 99 | if (r) { |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index aa5571b73aa0..720dd99163f8 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -2209,7 +2209,8 @@ int r600_cp_resume(struct radeon_device *rdev) | |||
2209 | /* Initialize the ring buffer's read and write pointers */ | 2209 | /* Initialize the ring buffer's read and write pointers */ |
2210 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); | 2210 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); |
2211 | WREG32(CP_RB_RPTR_WR, 0); | 2211 | WREG32(CP_RB_RPTR_WR, 0); |
2212 | WREG32(CP_RB_WPTR, 0); | 2212 | rdev->cp.wptr = 0; |
2213 | WREG32(CP_RB_WPTR, rdev->cp.wptr); | ||
2213 | 2214 | ||
2214 | /* set the wb address whether it's enabled or not */ | 2215 | /* set the wb address whether it's enabled or not */ |
2215 | WREG32(CP_RB_RPTR_ADDR, | 2216 | WREG32(CP_RB_RPTR_ADDR, |
@@ -2231,7 +2232,6 @@ int r600_cp_resume(struct radeon_device *rdev) | |||
2231 | WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); | 2232 | WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); |
2232 | 2233 | ||
2233 | rdev->cp.rptr = RREG32(CP_RB_RPTR); | 2234 | rdev->cp.rptr = RREG32(CP_RB_RPTR); |
2234 | rdev->cp.wptr = RREG32(CP_RB_WPTR); | ||
2235 | 2235 | ||
2236 | r600_cp_start(rdev); | 2236 | r600_cp_start(rdev); |
2237 | rdev->cp.ready = true; | 2237 | rdev->cp.ready = true; |
@@ -2353,21 +2353,23 @@ void r600_fence_ring_emit(struct radeon_device *rdev, | |||
2353 | } | 2353 | } |
2354 | 2354 | ||
2355 | int r600_copy_blit(struct radeon_device *rdev, | 2355 | int r600_copy_blit(struct radeon_device *rdev, |
2356 | uint64_t src_offset, uint64_t dst_offset, | 2356 | uint64_t src_offset, |
2357 | unsigned num_pages, struct radeon_fence *fence) | 2357 | uint64_t dst_offset, |
2358 | unsigned num_gpu_pages, | ||
2359 | struct radeon_fence *fence) | ||
2358 | { | 2360 | { |
2359 | int r; | 2361 | int r; |
2360 | 2362 | ||
2361 | mutex_lock(&rdev->r600_blit.mutex); | 2363 | mutex_lock(&rdev->r600_blit.mutex); |
2362 | rdev->r600_blit.vb_ib = NULL; | 2364 | rdev->r600_blit.vb_ib = NULL; |
2363 | r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); | 2365 | r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE); |
2364 | if (r) { | 2366 | if (r) { |
2365 | if (rdev->r600_blit.vb_ib) | 2367 | if (rdev->r600_blit.vb_ib) |
2366 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); | 2368 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); |
2367 | mutex_unlock(&rdev->r600_blit.mutex); | 2369 | mutex_unlock(&rdev->r600_blit.mutex); |
2368 | return r; | 2370 | return r; |
2369 | } | 2371 | } |
2370 | r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); | 2372 | r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE); |
2371 | r600_blit_done_copy(rdev, fence); | 2373 | r600_blit_done_copy(rdev, fence); |
2372 | mutex_unlock(&rdev->r600_blit.mutex); | 2374 | mutex_unlock(&rdev->r600_blit.mutex); |
2373 | return 0; | 2375 | return 0; |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 32807baf55e2..c1e056b35b29 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -322,6 +322,7 @@ union radeon_gart_table { | |||
322 | 322 | ||
323 | #define RADEON_GPU_PAGE_SIZE 4096 | 323 | #define RADEON_GPU_PAGE_SIZE 4096 |
324 | #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) | 324 | #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) |
325 | #define RADEON_GPU_PAGE_SHIFT 12 | ||
325 | 326 | ||
326 | struct radeon_gart { | 327 | struct radeon_gart { |
327 | dma_addr_t table_addr; | 328 | dma_addr_t table_addr; |
@@ -914,17 +915,17 @@ struct radeon_asic { | |||
914 | int (*copy_blit)(struct radeon_device *rdev, | 915 | int (*copy_blit)(struct radeon_device *rdev, |
915 | uint64_t src_offset, | 916 | uint64_t src_offset, |
916 | uint64_t dst_offset, | 917 | uint64_t dst_offset, |
917 | unsigned num_pages, | 918 | unsigned num_gpu_pages, |
918 | struct radeon_fence *fence); | 919 | struct radeon_fence *fence); |
919 | int (*copy_dma)(struct radeon_device *rdev, | 920 | int (*copy_dma)(struct radeon_device *rdev, |
920 | uint64_t src_offset, | 921 | uint64_t src_offset, |
921 | uint64_t dst_offset, | 922 | uint64_t dst_offset, |
922 | unsigned num_pages, | 923 | unsigned num_gpu_pages, |
923 | struct radeon_fence *fence); | 924 | struct radeon_fence *fence); |
924 | int (*copy)(struct radeon_device *rdev, | 925 | int (*copy)(struct radeon_device *rdev, |
925 | uint64_t src_offset, | 926 | uint64_t src_offset, |
926 | uint64_t dst_offset, | 927 | uint64_t dst_offset, |
927 | unsigned num_pages, | 928 | unsigned num_gpu_pages, |
928 | struct radeon_fence *fence); | 929 | struct radeon_fence *fence); |
929 | uint32_t (*get_engine_clock)(struct radeon_device *rdev); | 930 | uint32_t (*get_engine_clock)(struct radeon_device *rdev); |
930 | void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); | 931 | void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 3d7a0d7c6a9a..3dedaa07aac1 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -75,7 +75,7 @@ uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg); | |||
75 | int r100_copy_blit(struct radeon_device *rdev, | 75 | int r100_copy_blit(struct radeon_device *rdev, |
76 | uint64_t src_offset, | 76 | uint64_t src_offset, |
77 | uint64_t dst_offset, | 77 | uint64_t dst_offset, |
78 | unsigned num_pages, | 78 | unsigned num_gpu_pages, |
79 | struct radeon_fence *fence); | 79 | struct radeon_fence *fence); |
80 | int r100_set_surface_reg(struct radeon_device *rdev, int reg, | 80 | int r100_set_surface_reg(struct radeon_device *rdev, int reg, |
81 | uint32_t tiling_flags, uint32_t pitch, | 81 | uint32_t tiling_flags, uint32_t pitch, |
@@ -143,7 +143,7 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc); | |||
143 | extern int r200_copy_dma(struct radeon_device *rdev, | 143 | extern int r200_copy_dma(struct radeon_device *rdev, |
144 | uint64_t src_offset, | 144 | uint64_t src_offset, |
145 | uint64_t dst_offset, | 145 | uint64_t dst_offset, |
146 | unsigned num_pages, | 146 | unsigned num_gpu_pages, |
147 | struct radeon_fence *fence); | 147 | struct radeon_fence *fence); |
148 | void r200_set_safe_registers(struct radeon_device *rdev); | 148 | void r200_set_safe_registers(struct radeon_device *rdev); |
149 | 149 | ||
@@ -311,7 +311,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | |||
311 | int r600_ring_test(struct radeon_device *rdev); | 311 | int r600_ring_test(struct radeon_device *rdev); |
312 | int r600_copy_blit(struct radeon_device *rdev, | 312 | int r600_copy_blit(struct radeon_device *rdev, |
313 | uint64_t src_offset, uint64_t dst_offset, | 313 | uint64_t src_offset, uint64_t dst_offset, |
314 | unsigned num_pages, struct radeon_fence *fence); | 314 | unsigned num_gpu_pages, struct radeon_fence *fence); |
315 | void r600_hpd_init(struct radeon_device *rdev); | 315 | void r600_hpd_init(struct radeon_device *rdev); |
316 | void r600_hpd_fini(struct radeon_device *rdev); | 316 | void r600_hpd_fini(struct radeon_device *rdev); |
317 | bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 317 | bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
@@ -403,7 +403,7 @@ void evergreen_bandwidth_update(struct radeon_device *rdev); | |||
403 | void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | 403 | void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
404 | int evergreen_copy_blit(struct radeon_device *rdev, | 404 | int evergreen_copy_blit(struct radeon_device *rdev, |
405 | uint64_t src_offset, uint64_t dst_offset, | 405 | uint64_t src_offset, uint64_t dst_offset, |
406 | unsigned num_pages, struct radeon_fence *fence); | 406 | unsigned num_gpu_pages, struct radeon_fence *fence); |
407 | void evergreen_hpd_init(struct radeon_device *rdev); | 407 | void evergreen_hpd_init(struct radeon_device *rdev); |
408 | void evergreen_hpd_fini(struct radeon_device *rdev); | 408 | void evergreen_hpd_fini(struct radeon_device *rdev); |
409 | bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 409 | bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 6cc17fb96a57..6adb3e58affd 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -473,8 +473,8 @@ pflip_cleanup: | |||
473 | spin_lock_irqsave(&dev->event_lock, flags); | 473 | spin_lock_irqsave(&dev->event_lock, flags); |
474 | radeon_crtc->unpin_work = NULL; | 474 | radeon_crtc->unpin_work = NULL; |
475 | unlock_free: | 475 | unlock_free: |
476 | drm_gem_object_unreference_unlocked(old_radeon_fb->obj); | ||
477 | spin_unlock_irqrestore(&dev->event_lock, flags); | 476 | spin_unlock_irqrestore(&dev->event_lock, flags); |
477 | drm_gem_object_unreference_unlocked(old_radeon_fb->obj); | ||
478 | radeon_fence_unref(&work->fence); | 478 | radeon_fence_unref(&work->fence); |
479 | kfree(work); | 479 | kfree(work); |
480 | 480 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 319d85d7e759..13690f3eb4a4 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -1507,7 +1507,14 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1507 | switch (mode) { | 1507 | switch (mode) { |
1508 | case DRM_MODE_DPMS_ON: | 1508 | case DRM_MODE_DPMS_ON: |
1509 | args.ucAction = ATOM_ENABLE; | 1509 | args.ucAction = ATOM_ENABLE; |
1510 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 1510 | /* workaround for DVOOutputControl on some RS690 systems */ |
1511 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) { | ||
1512 | u32 reg = RREG32(RADEON_BIOS_3_SCRATCH); | ||
1513 | WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE); | ||
1514 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1515 | WREG32(RADEON_BIOS_3_SCRATCH, reg); | ||
1516 | } else | ||
1517 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1511 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 1518 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
1512 | args.ucAction = ATOM_LCD_BLON; | 1519 | args.ucAction = ATOM_LCD_BLON; |
1513 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 1520 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 9b86fb0e4122..0b5468bfaf54 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -277,7 +277,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, | |||
277 | DRM_ERROR("Trying to move memory with CP turned off.\n"); | 277 | DRM_ERROR("Trying to move memory with CP turned off.\n"); |
278 | return -EINVAL; | 278 | return -EINVAL; |
279 | } | 279 | } |
280 | r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); | 280 | |
281 | BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); | ||
282 | |||
283 | r = radeon_copy(rdev, old_start, new_start, | ||
284 | new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ | ||
285 | fence); | ||
281 | /* FIXME: handle copy error */ | 286 | /* FIXME: handle copy error */ |
282 | r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, | 287 | r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, |
283 | evict, no_wait_reserve, no_wait_gpu, new_mem); | 288 | evict, no_wait_reserve, no_wait_gpu, new_mem); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index a4d38d85909a..ef06194c5aa6 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -394,7 +394,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, | |||
394 | 394 | ||
395 | if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { | 395 | if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { |
396 | if (bo->ttm == NULL) { | 396 | if (bo->ttm == NULL) { |
397 | ret = ttm_bo_add_ttm(bo, false); | 397 | bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); |
398 | ret = ttm_bo_add_ttm(bo, zero); | ||
398 | if (ret) | 399 | if (ret) |
399 | goto out_err; | 400 | goto out_err; |
400 | } | 401 | } |