aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c17
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c11
6 files changed, 31 insertions, 12 deletions
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index df281b54db01..872ba11c4533 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -29,6 +29,7 @@
29 * Jesse Barnes <jesse.barnes@intel.com> 29 * Jesse Barnes <jesse.barnes@intel.com>
30 */ 30 */
31 31
32#include <linux/kernel.h>
32#include <linux/export.h> 33#include <linux/export.h>
33#include <linux/moduleparam.h> 34#include <linux/moduleparam.h>
34 35
@@ -88,7 +89,13 @@ bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
88 struct drm_connector *connector; 89 struct drm_connector *connector;
89 struct drm_device *dev = encoder->dev; 90 struct drm_device *dev = encoder->dev;
90 91
91 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 92 /*
93 * We can expect this mutex to be locked if we are not panicking.
94 * Locking is currently fubar in the panic handler.
95 */
96 if (!oops_in_progress)
97 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
98
92 list_for_each_entry(connector, &dev->mode_config.connector_list, head) 99 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
93 if (connector->encoder == encoder) 100 if (connector->encoder == encoder)
94 return true; 101 return true;
@@ -112,7 +119,13 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
112 struct drm_encoder *encoder; 119 struct drm_encoder *encoder;
113 struct drm_device *dev = crtc->dev; 120 struct drm_device *dev = crtc->dev;
114 121
115 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 122 /*
123 * We can expect this mutex to be locked if we are not panicking.
124 * Locking is currently fubar in the panic handler.
125 */
126 if (!oops_in_progress)
127 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
128
116 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) 129 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
117 if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder)) 130 if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
118 return true; 131 return true;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index c31c12b4e666..e911898348f8 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -270,8 +270,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
270 switch (mode) { 270 switch (mode) {
271 case DRM_MODE_DPMS_ON: 271 case DRM_MODE_DPMS_ON:
272 radeon_crtc->enabled = true; 272 radeon_crtc->enabled = true;
273 /* adjust pm to dpms changes BEFORE enabling crtcs */
274 radeon_pm_compute_clocks(rdev);
275 atombios_enable_crtc(crtc, ATOM_ENABLE); 273 atombios_enable_crtc(crtc, ATOM_ENABLE);
276 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) 274 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
277 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); 275 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
@@ -289,10 +287,10 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
289 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); 287 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
290 atombios_enable_crtc(crtc, ATOM_DISABLE); 288 atombios_enable_crtc(crtc, ATOM_DISABLE);
291 radeon_crtc->enabled = false; 289 radeon_crtc->enabled = false;
292 /* adjust pm to dpms changes AFTER disabling crtcs */
293 radeon_pm_compute_clocks(rdev);
294 break; 290 break;
295 } 291 }
292 /* adjust pm to dpms */
293 radeon_pm_compute_clocks(rdev);
296} 294}
297 295
298static void 296static void
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index be20e62dac83..e5f0177bea1e 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2049,8 +2049,8 @@ static struct radeon_asic ci_asic = {
2049 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 2049 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2050 .dma = &cik_copy_dma, 2050 .dma = &cik_copy_dma,
2051 .dma_ring_index = R600_RING_TYPE_DMA_INDEX, 2051 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
2052 .copy = &cik_copy_dma, 2052 .copy = &cik_copy_cpdma,
2053 .copy_ring_index = R600_RING_TYPE_DMA_INDEX, 2053 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
2054 }, 2054 },
2055 .surface = { 2055 .surface = {
2056 .set_reg = r600_set_surface_reg, 2056 .set_reg = r600_set_surface_reg,
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 14671406212f..2cd144c378d6 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1558,6 +1558,10 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1558 1558
1559 drm_kms_helper_poll_enable(dev); 1559 drm_kms_helper_poll_enable(dev);
1560 1560
1561 /* set the power state here in case we are a PX system or headless */
1562 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1563 radeon_pm_compute_clocks(rdev);
1564
1561 if (fbcon) { 1565 if (fbcon) {
1562 radeon_fbdev_set_suspend(rdev, 0); 1566 radeon_fbdev_set_suspend(rdev, 0);
1563 console_unlock(); 1567 console_unlock();
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 53d6e1bb48dc..2bdae61c0ac0 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1104,7 +1104,6 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev)
1104 if (ret) 1104 if (ret)
1105 goto dpm_resume_fail; 1105 goto dpm_resume_fail;
1106 rdev->pm.dpm_enabled = true; 1106 rdev->pm.dpm_enabled = true;
1107 radeon_pm_compute_clocks(rdev);
1108 return; 1107 return;
1109 1108
1110dpm_resume_fail: 1109dpm_resume_fail:
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 1f426696de36..c11b71d249e3 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -132,7 +132,7 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
132 struct radeon_cs_reloc *list; 132 struct radeon_cs_reloc *list;
133 unsigned i, idx; 133 unsigned i, idx;
134 134
135 list = kmalloc_array(vm->max_pde_used + 1, 135 list = kmalloc_array(vm->max_pde_used + 2,
136 sizeof(struct radeon_cs_reloc), GFP_KERNEL); 136 sizeof(struct radeon_cs_reloc), GFP_KERNEL);
137 if (!list) 137 if (!list)
138 return NULL; 138 return NULL;
@@ -585,7 +585,8 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
585{ 585{
586 static const uint32_t incr = RADEON_VM_PTE_COUNT * 8; 586 static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
587 587
588 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory); 588 struct radeon_bo *pd = vm->page_directory;
589 uint64_t pd_addr = radeon_bo_gpu_offset(pd);
589 uint64_t last_pde = ~0, last_pt = ~0; 590 uint64_t last_pde = ~0, last_pt = ~0;
590 unsigned count = 0, pt_idx, ndw; 591 unsigned count = 0, pt_idx, ndw;
591 struct radeon_ib ib; 592 struct radeon_ib ib;
@@ -642,6 +643,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
642 incr, R600_PTE_VALID); 643 incr, R600_PTE_VALID);
643 644
644 if (ib.length_dw != 0) { 645 if (ib.length_dw != 0) {
646 radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
645 radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); 647 radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
646 r = radeon_ib_schedule(rdev, &ib, NULL); 648 r = radeon_ib_schedule(rdev, &ib, NULL);
647 if (r) { 649 if (r) {
@@ -689,15 +691,18 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
689 /* walk over the address space and update the page tables */ 691 /* walk over the address space and update the page tables */
690 for (addr = start; addr < end; ) { 692 for (addr = start; addr < end; ) {
691 uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE; 693 uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
694 struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
692 unsigned nptes; 695 unsigned nptes;
693 uint64_t pte; 696 uint64_t pte;
694 697
698 radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj);
699
695 if ((addr & ~mask) == (end & ~mask)) 700 if ((addr & ~mask) == (end & ~mask))
696 nptes = end - addr; 701 nptes = end - addr;
697 else 702 else
698 nptes = RADEON_VM_PTE_COUNT - (addr & mask); 703 nptes = RADEON_VM_PTE_COUNT - (addr & mask);
699 704
700 pte = radeon_bo_gpu_offset(vm->page_tables[pt_idx].bo); 705 pte = radeon_bo_gpu_offset(pt);
701 pte += (addr & mask) * 8; 706 pte += (addr & mask) * 8;
702 707
703 if ((last_pte + 8 * count) != pte) { 708 if ((last_pte + 8 * count) != pte) {