diff options
author | Yong Zhao <yong.zhao@amd.com> | 2017-10-08 07:57:52 -0400 |
---|---|---|
committer | Oded Gabbay <oded.gabbay@gmail.com> | 2017-10-08 07:57:52 -0400 |
commit | 4465f466c76774d3b5866929524cce6dd2d4dfb1 (patch) | |
tree | 54d1fa3575204b4c39ea8956bba76641d6bc2f66 | |
parent | ac30c78384885b209324dacc7b65bd8e9cc69fbf (diff) |
drm/amdkfd: Pass filter params to unmap_queues_cpsch
Signed-off-by: Yong Zhao <yong.zhao@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 20 |
1 files changed, 9 insertions, 11 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 0f9c39ba548a..be925a49dd10 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | |||
@@ -46,7 +46,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, | |||
46 | 46 | ||
47 | static int execute_queues_cpsch(struct device_queue_manager *dqm); | 47 | static int execute_queues_cpsch(struct device_queue_manager *dqm); |
48 | static int unmap_queues_cpsch(struct device_queue_manager *dqm, | 48 | static int unmap_queues_cpsch(struct device_queue_manager *dqm, |
49 | bool static_queues_included); | 49 | enum kfd_unmap_queues_filter filter, |
50 | uint32_t filter_param); | ||
50 | 51 | ||
51 | static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, | 52 | static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, |
52 | struct queue *q, | 53 | struct queue *q, |
@@ -710,7 +711,7 @@ fail_packet_manager_init: | |||
710 | static int stop_cpsch(struct device_queue_manager *dqm) | 711 | static int stop_cpsch(struct device_queue_manager *dqm) |
711 | { | 712 | { |
712 | mutex_lock(&dqm->lock); | 713 | mutex_lock(&dqm->lock); |
713 | unmap_queues_cpsch(dqm, true); | 714 | unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); |
714 | mutex_unlock(&dqm->lock); | 715 | mutex_unlock(&dqm->lock); |
715 | 716 | ||
716 | kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); | 717 | kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); |
@@ -754,7 +755,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, | |||
754 | { | 755 | { |
755 | mutex_lock(&dqm->lock); | 756 | mutex_lock(&dqm->lock); |
756 | /* here we actually preempt the DIQ */ | 757 | /* here we actually preempt the DIQ */ |
757 | unmap_queues_cpsch(dqm, true); | 758 | unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); |
758 | list_del(&kq->list); | 759 | list_del(&kq->list); |
759 | dqm->queue_count--; | 760 | dqm->queue_count--; |
760 | qpd->is_debug = false; | 761 | qpd->is_debug = false; |
@@ -863,10 +864,10 @@ static int unmap_sdma_queues(struct device_queue_manager *dqm, | |||
863 | 864 | ||
864 | /* dqm->lock mutex has to be locked before calling this function */ | 865 | /* dqm->lock mutex has to be locked before calling this function */ |
865 | static int unmap_queues_cpsch(struct device_queue_manager *dqm, | 866 | static int unmap_queues_cpsch(struct device_queue_manager *dqm, |
866 | bool static_queues_included) | 867 | enum kfd_unmap_queues_filter filter, |
868 | uint32_t filter_param) | ||
867 | { | 869 | { |
868 | int retval; | 870 | int retval; |
869 | enum kfd_unmap_queues_filter filter; | ||
870 | struct kfd_process_device *pdd; | 871 | struct kfd_process_device *pdd; |
871 | 872 | ||
872 | retval = 0; | 873 | retval = 0; |
@@ -882,12 +883,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, | |||
882 | unmap_sdma_queues(dqm, 1); | 883 | unmap_sdma_queues(dqm, 1); |
883 | } | 884 | } |
884 | 885 | ||
885 | filter = static_queues_included ? | ||
886 | KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES : | ||
887 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES; | ||
888 | |||
889 | retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE, | 886 | retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE, |
890 | filter, 0, false, 0); | 887 | filter, filter_param, false, 0); |
891 | if (retval) | 888 | if (retval) |
892 | return retval; | 889 | return retval; |
893 | 890 | ||
@@ -914,7 +911,8 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm) | |||
914 | { | 911 | { |
915 | int retval; | 912 | int retval; |
916 | 913 | ||
917 | retval = unmap_queues_cpsch(dqm, false); | 914 | retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, |
915 | 0); | ||
918 | if (retval) { | 916 | if (retval) { |
919 | pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption"); | 917 | pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption"); |
920 | return retval; | 918 | return retval; |