aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_device.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_device.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c42
1 files changed, 19 insertions, 23 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 5992502a3448..066c98b888a5 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -193,7 +193,7 @@ int radeon_wb_init(struct radeon_device *rdev)
193 193
194 if (rdev->wb.wb_obj == NULL) { 194 if (rdev->wb.wb_obj == NULL) {
195 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, 195 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
196 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); 196 RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
197 if (r) { 197 if (r) {
198 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); 198 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
199 return r; 199 return r;
@@ -225,9 +225,9 @@ int radeon_wb_init(struct radeon_device *rdev)
225 /* disable event_write fences */ 225 /* disable event_write fences */
226 rdev->wb.use_event = false; 226 rdev->wb.use_event = false;
227 /* disabled via module param */ 227 /* disabled via module param */
228 if (radeon_no_wb == 1) 228 if (radeon_no_wb == 1) {
229 rdev->wb.enabled = false; 229 rdev->wb.enabled = false;
230 else { 230 } else {
231 if (rdev->flags & RADEON_IS_AGP) { 231 if (rdev->flags & RADEON_IS_AGP) {
232 /* often unreliable on AGP */ 232 /* often unreliable on AGP */
233 rdev->wb.enabled = false; 233 rdev->wb.enabled = false;
@@ -237,8 +237,9 @@ int radeon_wb_init(struct radeon_device *rdev)
237 } else { 237 } else {
238 rdev->wb.enabled = true; 238 rdev->wb.enabled = true;
239 /* event_write fences are only available on r600+ */ 239 /* event_write fences are only available on r600+ */
240 if (rdev->family >= CHIP_R600) 240 if (rdev->family >= CHIP_R600) {
241 rdev->wb.use_event = true; 241 rdev->wb.use_event = true;
242 }
242 } 243 }
243 } 244 }
244 /* always use writeback/events on NI, APUs */ 245 /* always use writeback/events on NI, APUs */
@@ -696,6 +697,11 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
696 return can_switch; 697 return can_switch;
697} 698}
698 699
700static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
701 .set_gpu_state = radeon_switcheroo_set_state,
702 .reprobe = NULL,
703 .can_switch = radeon_switcheroo_can_switch,
704};
699 705
700int radeon_device_init(struct radeon_device *rdev, 706int radeon_device_init(struct radeon_device *rdev,
701 struct drm_device *ddev, 707 struct drm_device *ddev,
@@ -714,7 +720,6 @@ int radeon_device_init(struct radeon_device *rdev,
714 rdev->is_atom_bios = false; 720 rdev->is_atom_bios = false;
715 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; 721 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
716 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 722 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
717 rdev->gpu_lockup = false;
718 rdev->accel_working = false; 723 rdev->accel_working = false;
719 724
720 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", 725 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
@@ -724,21 +729,18 @@ int radeon_device_init(struct radeon_device *rdev,
724 /* mutex initialization are all done here so we 729 /* mutex initialization are all done here so we
725 * can recall function without having locking issues */ 730 * can recall function without having locking issues */
726 radeon_mutex_init(&rdev->cs_mutex); 731 radeon_mutex_init(&rdev->cs_mutex);
727 radeon_mutex_init(&rdev->ib_pool.mutex); 732 mutex_init(&rdev->ring_lock);
728 for (i = 0; i < RADEON_NUM_RINGS; ++i)
729 mutex_init(&rdev->ring[i].mutex);
730 mutex_init(&rdev->dc_hw_i2c_mutex); 733 mutex_init(&rdev->dc_hw_i2c_mutex);
731 if (rdev->family >= CHIP_R600) 734 if (rdev->family >= CHIP_R600)
732 spin_lock_init(&rdev->ih.lock); 735 spin_lock_init(&rdev->ih.lock);
733 mutex_init(&rdev->gem.mutex); 736 mutex_init(&rdev->gem.mutex);
734 mutex_init(&rdev->pm.mutex); 737 mutex_init(&rdev->pm.mutex);
735 mutex_init(&rdev->vram_mutex); 738 mutex_init(&rdev->vram_mutex);
736 rwlock_init(&rdev->fence_lock);
737 rwlock_init(&rdev->semaphore_drv.lock);
738 INIT_LIST_HEAD(&rdev->gem.objects);
739 init_waitqueue_head(&rdev->irq.vblank_queue); 739 init_waitqueue_head(&rdev->irq.vblank_queue);
740 init_waitqueue_head(&rdev->irq.idle_queue); 740 init_waitqueue_head(&rdev->irq.idle_queue);
741 INIT_LIST_HEAD(&rdev->semaphore_drv.bo); 741 r = radeon_gem_init(rdev);
742 if (r)
743 return r;
742 /* initialize vm here */ 744 /* initialize vm here */
743 rdev->vm_manager.use_bitmap = 1; 745 rdev->vm_manager.use_bitmap = 1;
744 rdev->vm_manager.max_pfn = 1 << 20; 746 rdev->vm_manager.max_pfn = 1 << 20;
@@ -814,10 +816,7 @@ int radeon_device_init(struct radeon_device *rdev,
814 /* this will fail for cards that aren't VGA class devices, just 816 /* this will fail for cards that aren't VGA class devices, just
815 * ignore it */ 817 * ignore it */
816 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 818 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
817 vga_switcheroo_register_client(rdev->pdev, 819 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops);
818 radeon_switcheroo_set_state,
819 NULL,
820 radeon_switcheroo_can_switch);
821 820
822 r = radeon_init(rdev); 821 r = radeon_init(rdev);
823 if (r) 822 if (r)
@@ -914,9 +913,12 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
914 } 913 }
915 /* evict vram memory */ 914 /* evict vram memory */
916 radeon_bo_evict_vram(rdev); 915 radeon_bo_evict_vram(rdev);
916
917 mutex_lock(&rdev->ring_lock);
917 /* wait for gpu to finish processing current batch */ 918 /* wait for gpu to finish processing current batch */
918 for (i = 0; i < RADEON_NUM_RINGS; i++) 919 for (i = 0; i < RADEON_NUM_RINGS; i++)
919 radeon_fence_wait_last(rdev, i); 920 radeon_fence_wait_empty_locked(rdev, i);
921 mutex_unlock(&rdev->ring_lock);
920 922
921 radeon_save_bios_scratch_regs(rdev); 923 radeon_save_bios_scratch_regs(rdev);
922 924
@@ -955,7 +957,6 @@ int radeon_resume_kms(struct drm_device *dev)
955 console_unlock(); 957 console_unlock();
956 return -1; 958 return -1;
957 } 959 }
958 pci_set_master(dev->pdev);
959 /* resume AGP if in use */ 960 /* resume AGP if in use */
960 radeon_agp_resume(rdev); 961 radeon_agp_resume(rdev);
961 radeon_resume(rdev); 962 radeon_resume(rdev);
@@ -988,9 +989,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
988 int r; 989 int r;
989 int resched; 990 int resched;
990 991
991 /* Prevent CS ioctl from interfering */
992 radeon_mutex_lock(&rdev->cs_mutex);
993
994 radeon_save_bios_scratch_regs(rdev); 992 radeon_save_bios_scratch_regs(rdev);
995 /* block TTM */ 993 /* block TTM */
996 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 994 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
@@ -1005,8 +1003,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
1005 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 1003 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1006 } 1004 }
1007 1005
1008 radeon_mutex_unlock(&rdev->cs_mutex);
1009
1010 if (r) { 1006 if (r) {
1011 /* bad news, how to tell it to userspace ? */ 1007 /* bad news, how to tell it to userspace ? */
1012 dev_info(rdev->dev, "GPU reset failed\n"); 1008 dev_info(rdev->dev, "GPU reset failed\n");