diff options
author | Dave Airlie <airlied@redhat.com> | 2018-10-04 18:39:31 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2018-10-04 18:39:35 -0400 |
commit | bdf800c6fdf5674999bc0228d5040cc0ae218fa8 (patch) | |
tree | 223c6c6764fe1e3b40b3cca522eb0f11a8fdf69d | |
parent | 3a9df1e9259362ed1ec321ce4229f1cd992355e6 (diff) | |
parent | 11b29c9e25788d0afb2ddb67bcd89424bd25f2f7 (diff) |
Merge branch 'drm-fixes-4.19' of git://people.freedesktop.org/~agd5f/linux into drm-fixes
- Fix an ordering issue in DC with respect to atomic flips that could result
in a crash
- Fix incorrect use of process->mm in KFD
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1538668374-22334-1-git-send-email-alexander.deucher@amd.com
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 37 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 10 |
2 files changed, 37 insertions, 10 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index ec0d62a16e53..4f22e745df51 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | |||
@@ -358,8 +358,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, | |||
358 | struct queue *q, | 358 | struct queue *q, |
359 | struct qcm_process_device *qpd) | 359 | struct qcm_process_device *qpd) |
360 | { | 360 | { |
361 | int retval; | ||
362 | struct mqd_manager *mqd_mgr; | 361 | struct mqd_manager *mqd_mgr; |
362 | int retval; | ||
363 | 363 | ||
364 | mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); | 364 | mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); |
365 | if (!mqd_mgr) | 365 | if (!mqd_mgr) |
@@ -387,8 +387,12 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, | |||
387 | if (!q->properties.is_active) | 387 | if (!q->properties.is_active) |
388 | return 0; | 388 | return 0; |
389 | 389 | ||
390 | retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue, | 390 | if (WARN(q->process->mm != current->mm, |
391 | &q->properties, q->process->mm); | 391 | "should only run in user thread")) |
392 | retval = -EFAULT; | ||
393 | else | ||
394 | retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue, | ||
395 | &q->properties, current->mm); | ||
392 | if (retval) | 396 | if (retval) |
393 | goto out_uninit_mqd; | 397 | goto out_uninit_mqd; |
394 | 398 | ||
@@ -545,9 +549,15 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) | |||
545 | retval = map_queues_cpsch(dqm); | 549 | retval = map_queues_cpsch(dqm); |
546 | else if (q->properties.is_active && | 550 | else if (q->properties.is_active && |
547 | (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || | 551 | (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || |
548 | q->properties.type == KFD_QUEUE_TYPE_SDMA)) | 552 | q->properties.type == KFD_QUEUE_TYPE_SDMA)) { |
549 | retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue, | 553 | if (WARN(q->process->mm != current->mm, |
550 | &q->properties, q->process->mm); | 554 | "should only run in user thread")) |
555 | retval = -EFAULT; | ||
556 | else | ||
557 | retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, | ||
558 | q->pipe, q->queue, | ||
559 | &q->properties, current->mm); | ||
560 | } | ||
551 | 561 | ||
552 | out_unlock: | 562 | out_unlock: |
553 | dqm_unlock(dqm); | 563 | dqm_unlock(dqm); |
@@ -653,6 +663,7 @@ out: | |||
653 | static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, | 663 | static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, |
654 | struct qcm_process_device *qpd) | 664 | struct qcm_process_device *qpd) |
655 | { | 665 | { |
666 | struct mm_struct *mm = NULL; | ||
656 | struct queue *q; | 667 | struct queue *q; |
657 | struct mqd_manager *mqd_mgr; | 668 | struct mqd_manager *mqd_mgr; |
658 | struct kfd_process_device *pdd; | 669 | struct kfd_process_device *pdd; |
@@ -686,6 +697,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, | |||
686 | kfd_flush_tlb(pdd); | 697 | kfd_flush_tlb(pdd); |
687 | } | 698 | } |
688 | 699 | ||
700 | /* Take a safe reference to the mm_struct, which may otherwise | ||
701 | * disappear even while the kfd_process is still referenced. | ||
702 | */ | ||
703 | mm = get_task_mm(pdd->process->lead_thread); | ||
704 | if (!mm) { | ||
705 | retval = -EFAULT; | ||
706 | goto out; | ||
707 | } | ||
708 | |||
689 | /* activate all active queues on the qpd */ | 709 | /* activate all active queues on the qpd */ |
690 | list_for_each_entry(q, &qpd->queues_list, list) { | 710 | list_for_each_entry(q, &qpd->queues_list, list) { |
691 | if (!q->properties.is_evicted) | 711 | if (!q->properties.is_evicted) |
@@ -700,14 +720,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, | |||
700 | q->properties.is_evicted = false; | 720 | q->properties.is_evicted = false; |
701 | q->properties.is_active = true; | 721 | q->properties.is_active = true; |
702 | retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, | 722 | retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, |
703 | q->queue, &q->properties, | 723 | q->queue, &q->properties, mm); |
704 | q->process->mm); | ||
705 | if (retval) | 724 | if (retval) |
706 | goto out; | 725 | goto out; |
707 | dqm->queue_count++; | 726 | dqm->queue_count++; |
708 | } | 727 | } |
709 | qpd->evicted = 0; | 728 | qpd->evicted = 0; |
710 | out: | 729 | out: |
730 | if (mm) | ||
731 | mmput(mm); | ||
711 | dqm_unlock(dqm); | 732 | dqm_unlock(dqm); |
712 | return retval; | 733 | return retval; |
713 | } | 734 | } |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 96875950845a..6903fe6c894b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
@@ -4633,12 +4633,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) | |||
4633 | } | 4633 | } |
4634 | spin_unlock_irqrestore(&adev->ddev->event_lock, flags); | 4634 | spin_unlock_irqrestore(&adev->ddev->event_lock, flags); |
4635 | 4635 | ||
4636 | /* Signal HW programming completion */ | ||
4637 | drm_atomic_helper_commit_hw_done(state); | ||
4638 | 4636 | ||
4639 | if (wait_for_vblank) | 4637 | if (wait_for_vblank) |
4640 | drm_atomic_helper_wait_for_flip_done(dev, state); | 4638 | drm_atomic_helper_wait_for_flip_done(dev, state); |
4641 | 4639 | ||
4640 | /* | ||
4641 | * FIXME: | ||
4642 | * Delay hw_done() until flip_done() is signaled. This is to block | ||
4643 | * another commit from freeing the CRTC state while we're still | ||
4644 | * waiting on flip_done. | ||
4645 | */ | ||
4646 | drm_atomic_helper_commit_hw_done(state); | ||
4647 | |||
4642 | drm_atomic_helper_cleanup_planes(dev, state); | 4648 | drm_atomic_helper_cleanup_planes(dev, state); |
4643 | 4649 | ||
4644 | /* Finally, drop a runtime PM reference for each newly disabled CRTC, | 4650 | /* Finally, drop a runtime PM reference for each newly disabled CRTC, |