aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-06-21 12:33:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-06-21 12:33:06 -0400
commite61cd5e2e36746e5a4e791b9232aed39bce52ded (patch)
tree9a9ba4b291698f94e00aaedbe6e4c551d7a51f5a
parent64a2f30a89a3c26a5152b09f4d390b9d91cab0cc (diff)
parent9aa36876ddeb85dfb0bcf37be06bbdc62e954f16 (diff)
Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm radeon fixes from Dave Airlie: "One core fix, but mostly radeon fixes for s/r and big endian UVD support, and a fix to stop the GPU being reset for no good reason, and crashing people's machines." * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: drm/radeon: update lockup tracking when scheduling in empty ring drm/prime: Honor requested file flags when exporting a buffer drm/radeon: fix UVD on big endian drm/radeon: fix write back suspend regression with uvd v2 drm/radeon: do not try to uselessly update virtual memory pagetable
-rw-r--r--drivers/gpu/drm/drm_prime.c3
-rw-r--r--drivers/gpu/drm/radeon/r600.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c53
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c48
7 files changed, 85 insertions, 55 deletions
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index dcde35231e25..5b7b9110254b 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -190,8 +190,7 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
190 if (ret) 190 if (ret)
191 return ERR_PTR(ret); 191 return ERR_PTR(ret);
192 } 192 }
193 return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, 193 return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags);
194 0600);
195} 194}
196EXPORT_SYMBOL(drm_gem_prime_export); 195EXPORT_SYMBOL(drm_gem_prime_export);
197 196
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 0e5341695922..6948eb88c2b7 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2687,6 +2687,9 @@ void r600_uvd_rbc_stop(struct radeon_device *rdev)
2687int r600_uvd_init(struct radeon_device *rdev) 2687int r600_uvd_init(struct radeon_device *rdev)
2688{ 2688{
2689 int i, j, r; 2689 int i, j, r;
2690 /* disable byte swapping */
2691 u32 lmi_swap_cntl = 0;
2692 u32 mp_swap_cntl = 0;
2690 2693
2691 /* raise clocks while booting up the VCPU */ 2694 /* raise clocks while booting up the VCPU */
2692 radeon_set_uvd_clocks(rdev, 53300, 40000); 2695 radeon_set_uvd_clocks(rdev, 53300, 40000);
@@ -2711,9 +2714,13 @@ int r600_uvd_init(struct radeon_device *rdev)
2711 WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | 2714 WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
2712 (1 << 21) | (1 << 9) | (1 << 20)); 2715 (1 << 21) | (1 << 9) | (1 << 20));
2713 2716
2714 /* disable byte swapping */ 2717#ifdef __BIG_ENDIAN
2715 WREG32(UVD_LMI_SWAP_CNTL, 0); 2718 /* swap (8 in 32) RB and IB */
2716 WREG32(UVD_MP_SWAP_CNTL, 0); 2719 lmi_swap_cntl = 0xa;
2720 mp_swap_cntl = 0;
2721#endif
2722 WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
2723 WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
2717 2724
2718 WREG32(UVD_MPC_SET_MUXA0, 0x40c2040); 2725 WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
2719 WREG32(UVD_MPC_SET_MUXA1, 0x0); 2726 WREG32(UVD_MPC_SET_MUXA1, 0x0);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 189973836cff..b0dc0b6cb4e0 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -244,16 +244,6 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
244 */ 244 */
245void radeon_wb_disable(struct radeon_device *rdev) 245void radeon_wb_disable(struct radeon_device *rdev)
246{ 246{
247 int r;
248
249 if (rdev->wb.wb_obj) {
250 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
251 if (unlikely(r != 0))
252 return;
253 radeon_bo_kunmap(rdev->wb.wb_obj);
254 radeon_bo_unpin(rdev->wb.wb_obj);
255 radeon_bo_unreserve(rdev->wb.wb_obj);
256 }
257 rdev->wb.enabled = false; 247 rdev->wb.enabled = false;
258} 248}
259 249
@@ -269,6 +259,11 @@ void radeon_wb_fini(struct radeon_device *rdev)
269{ 259{
270 radeon_wb_disable(rdev); 260 radeon_wb_disable(rdev);
271 if (rdev->wb.wb_obj) { 261 if (rdev->wb.wb_obj) {
262 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
263 radeon_bo_kunmap(rdev->wb.wb_obj);
264 radeon_bo_unpin(rdev->wb.wb_obj);
265 radeon_bo_unreserve(rdev->wb.wb_obj);
266 }
272 radeon_bo_unref(&rdev->wb.wb_obj); 267 radeon_bo_unref(&rdev->wb.wb_obj);
273 rdev->wb.wb = NULL; 268 rdev->wb.wb = NULL;
274 rdev->wb.wb_obj = NULL; 269 rdev->wb.wb_obj = NULL;
@@ -295,26 +290,26 @@ int radeon_wb_init(struct radeon_device *rdev)
295 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); 290 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
296 return r; 291 return r;
297 } 292 }
298 } 293 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
299 r = radeon_bo_reserve(rdev->wb.wb_obj, false); 294 if (unlikely(r != 0)) {
300 if (unlikely(r != 0)) { 295 radeon_wb_fini(rdev);
301 radeon_wb_fini(rdev); 296 return r;
302 return r; 297 }
303 } 298 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
304 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, 299 &rdev->wb.gpu_addr);
305 &rdev->wb.gpu_addr); 300 if (r) {
306 if (r) { 301 radeon_bo_unreserve(rdev->wb.wb_obj);
302 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
303 radeon_wb_fini(rdev);
304 return r;
305 }
306 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
307 radeon_bo_unreserve(rdev->wb.wb_obj); 307 radeon_bo_unreserve(rdev->wb.wb_obj);
308 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); 308 if (r) {
309 radeon_wb_fini(rdev); 309 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
310 return r; 310 radeon_wb_fini(rdev);
311 } 311 return r;
312 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 312 }
313 radeon_bo_unreserve(rdev->wb.wb_obj);
314 if (r) {
315 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
316 radeon_wb_fini(rdev);
317 return r;
318 } 313 }
319 314
320 /* clear wb memory */ 315 /* clear wb memory */
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 5b937dfe6f65..ddb8f8e04eb5 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -63,7 +63,9 @@ static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
63{ 63{
64 struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; 64 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
65 if (likely(rdev->wb.enabled || !drv->scratch_reg)) { 65 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
66 *drv->cpu_addr = cpu_to_le32(seq); 66 if (drv->cpu_addr) {
67 *drv->cpu_addr = cpu_to_le32(seq);
68 }
67 } else { 69 } else {
68 WREG32(drv->scratch_reg, seq); 70 WREG32(drv->scratch_reg, seq);
69 } 71 }
@@ -84,7 +86,11 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
84 u32 seq = 0; 86 u32 seq = 0;
85 87
86 if (likely(rdev->wb.enabled || !drv->scratch_reg)) { 88 if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
87 seq = le32_to_cpu(*drv->cpu_addr); 89 if (drv->cpu_addr) {
90 seq = le32_to_cpu(*drv->cpu_addr);
91 } else {
92 seq = lower_32_bits(atomic64_read(&drv->last_seq));
93 }
88 } else { 94 } else {
89 seq = RREG32(drv->scratch_reg); 95 seq = RREG32(drv->scratch_reg);
90 } 96 }
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 2c1341f63dc5..43ec4a401f07 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -1197,11 +1197,13 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1197int radeon_vm_bo_rmv(struct radeon_device *rdev, 1197int radeon_vm_bo_rmv(struct radeon_device *rdev,
1198 struct radeon_bo_va *bo_va) 1198 struct radeon_bo_va *bo_va)
1199{ 1199{
1200 int r; 1200 int r = 0;
1201 1201
1202 mutex_lock(&rdev->vm_manager.lock); 1202 mutex_lock(&rdev->vm_manager.lock);
1203 mutex_lock(&bo_va->vm->mutex); 1203 mutex_lock(&bo_va->vm->mutex);
1204 r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); 1204 if (bo_va->soffset) {
1205 r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
1206 }
1205 mutex_unlock(&rdev->vm_manager.lock); 1207 mutex_unlock(&rdev->vm_manager.lock);
1206 list_del(&bo_va->vm_list); 1208 list_del(&bo_va->vm_list);
1207 mutex_unlock(&bo_va->vm->mutex); 1209 mutex_unlock(&bo_va->vm->mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index e17faa7cf732..82434018cbe8 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -402,6 +402,13 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
402 return -ENOMEM; 402 return -ENOMEM;
403 /* Align requested size with padding so unlock_commit can 403 /* Align requested size with padding so unlock_commit can
404 * pad safely */ 404 * pad safely */
405 radeon_ring_free_size(rdev, ring);
406 if (ring->ring_free_dw == (ring->ring_size / 4)) {
407 /* This is an empty ring update lockup info to avoid
408 * false positive.
409 */
410 radeon_ring_lockup_update(ring);
411 }
405 ndw = (ndw + ring->align_mask) & ~ring->align_mask; 412 ndw = (ndw + ring->align_mask) & ~ring->align_mask;
406 while (ndw > (ring->ring_free_dw - 1)) { 413 while (ndw > (ring->ring_free_dw - 1)) {
407 radeon_ring_free_size(rdev, ring); 414 radeon_ring_free_size(rdev, ring);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 906e5c0ca3b9..cad735dd02c6 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -159,7 +159,17 @@ int radeon_uvd_suspend(struct radeon_device *rdev)
159 if (!r) { 159 if (!r) {
160 radeon_bo_kunmap(rdev->uvd.vcpu_bo); 160 radeon_bo_kunmap(rdev->uvd.vcpu_bo);
161 radeon_bo_unpin(rdev->uvd.vcpu_bo); 161 radeon_bo_unpin(rdev->uvd.vcpu_bo);
162 rdev->uvd.cpu_addr = NULL;
163 if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) {
164 radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
165 }
162 radeon_bo_unreserve(rdev->uvd.vcpu_bo); 166 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
167
168 if (rdev->uvd.cpu_addr) {
169 radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
170 } else {
171 rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL;
172 }
163 } 173 }
164 return r; 174 return r;
165} 175}
@@ -178,6 +188,10 @@ int radeon_uvd_resume(struct radeon_device *rdev)
178 return r; 188 return r;
179 } 189 }
180 190
191 /* Have been pin in cpu unmap unpin */
192 radeon_bo_kunmap(rdev->uvd.vcpu_bo);
193 radeon_bo_unpin(rdev->uvd.vcpu_bo);
194
181 r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, 195 r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
182 &rdev->uvd.gpu_addr); 196 &rdev->uvd.gpu_addr);
183 if (r) { 197 if (r) {
@@ -613,19 +627,19 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
613 } 627 }
614 628
615 /* stitch together an UVD create msg */ 629 /* stitch together an UVD create msg */
616 msg[0] = 0x00000de4; 630 msg[0] = cpu_to_le32(0x00000de4);
617 msg[1] = 0x00000000; 631 msg[1] = cpu_to_le32(0x00000000);
618 msg[2] = handle; 632 msg[2] = cpu_to_le32(handle);
619 msg[3] = 0x00000000; 633 msg[3] = cpu_to_le32(0x00000000);
620 msg[4] = 0x00000000; 634 msg[4] = cpu_to_le32(0x00000000);
621 msg[5] = 0x00000000; 635 msg[5] = cpu_to_le32(0x00000000);
622 msg[6] = 0x00000000; 636 msg[6] = cpu_to_le32(0x00000000);
623 msg[7] = 0x00000780; 637 msg[7] = cpu_to_le32(0x00000780);
624 msg[8] = 0x00000440; 638 msg[8] = cpu_to_le32(0x00000440);
625 msg[9] = 0x00000000; 639 msg[9] = cpu_to_le32(0x00000000);
626 msg[10] = 0x01b37000; 640 msg[10] = cpu_to_le32(0x01b37000);
627 for (i = 11; i < 1024; ++i) 641 for (i = 11; i < 1024; ++i)
628 msg[i] = 0x0; 642 msg[i] = cpu_to_le32(0x0);
629 643
630 radeon_bo_kunmap(bo); 644 radeon_bo_kunmap(bo);
631 radeon_bo_unreserve(bo); 645 radeon_bo_unreserve(bo);
@@ -659,12 +673,12 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
659 } 673 }
660 674
661 /* stitch together an UVD destroy msg */ 675 /* stitch together an UVD destroy msg */
662 msg[0] = 0x00000de4; 676 msg[0] = cpu_to_le32(0x00000de4);
663 msg[1] = 0x00000002; 677 msg[1] = cpu_to_le32(0x00000002);
664 msg[2] = handle; 678 msg[2] = cpu_to_le32(handle);
665 msg[3] = 0x00000000; 679 msg[3] = cpu_to_le32(0x00000000);
666 for (i = 4; i < 1024; ++i) 680 for (i = 4; i < 1024; ++i)
667 msg[i] = 0x0; 681 msg[i] = cpu_to_le32(0x0);
668 682
669 radeon_bo_kunmap(bo); 683 radeon_bo_kunmap(bo);
670 radeon_bo_unreserve(bo); 684 radeon_bo_unreserve(bo);