diff options
| author | Dave Airlie <airlied@redhat.com> | 2015-01-26 18:48:33 -0500 |
|---|---|---|
| committer | Dave Airlie <airlied@redhat.com> | 2015-01-26 18:48:33 -0500 |
| commit | db9098ba1b51995c0518a621f58efd41bfeb4bfc (patch) | |
| tree | dd37528ea35d1fdda061bb9f00724ac8d5fab2fa | |
| parent | 22cbbceff65cb03ee37495b52f360809fa439293 (diff) | |
| parent | 9fa843e76d9092e5348aac7252cbb71f09902507 (diff) | |
Merge tag 'drm-amdkfd-fixes-2015-01-26' of git://people.freedesktop.org/~gabbayo/linux into drm-fixes
A couple of fixes for -rc7 in amdkfd:
- Forgot to free resources when creation of queue has failed
- Initialization of pipelines was incorrect (3 patches)
In addition, The patch "drm/amdkfd: Allow user to limit only queues per device"
is not a fix, but I would like to push it for 3.19 as it changes the ABI
between amdkfd and userspace (by changing the module parameters). I would
prefer *not* to support the two deprecated module parameters if I don't have
too, as amdkfd hasn't been released yet.
* tag 'drm-amdkfd-fixes-2015-01-26' of git://people.freedesktop.org/~gabbayo/linux:
drm/amdkfd: Fix bug in call to init_pipelines()
drm/amdkfd: Fix bug in pipelines initialization
drm/radeon: Don't increment pipe_id in kgd_init_pipeline
drm/amdkfd: Allow user to limit only queues per device
drm/amdkfd: PQM handle queue creation fault
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_device.c | 6 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 78 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 1 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_module.c | 27 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_pasid.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 17 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 12 | ||||
| -rw-r--r-- | drivers/gpu/drm/radeon/radeon_kfd.c | 2 |
8 files changed, 106 insertions, 39 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 633532a2e7ec..25bc47f3c1cf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
| 27 | #include "kfd_priv.h" | 27 | #include "kfd_priv.h" |
| 28 | #include "kfd_device_queue_manager.h" | 28 | #include "kfd_device_queue_manager.h" |
| 29 | #include "kfd_pm4_headers.h" | ||
| 29 | 30 | ||
| 30 | #define MQD_SIZE_ALIGNED 768 | 31 | #define MQD_SIZE_ALIGNED 768 |
| 31 | 32 | ||
| @@ -169,9 +170,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, | |||
| 169 | kfd->shared_resources = *gpu_resources; | 170 | kfd->shared_resources = *gpu_resources; |
| 170 | 171 | ||
| 171 | /* calculate max size of mqds needed for queues */ | 172 | /* calculate max size of mqds needed for queues */ |
| 172 | size = max_num_of_processes * | 173 | size = max_num_of_queues_per_device * |
| 173 | max_num_of_queues_per_process * | 174 | kfd->device_info->mqd_size_aligned; |
| 174 | kfd->device_info->mqd_size_aligned; | ||
| 175 | 175 | ||
| 176 | /* add another 512KB for all other allocations on gart */ | 176 | /* add another 512KB for all other allocations on gart */ |
| 177 | size += 512 * 1024; | 177 | size += 512 * 1024; |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 30c8fda9622e..0d8694f015c1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | |||
| @@ -183,6 +183,13 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, | |||
| 183 | 183 | ||
| 184 | mutex_lock(&dqm->lock); | 184 | mutex_lock(&dqm->lock); |
| 185 | 185 | ||
| 186 | if (dqm->total_queue_count >= max_num_of_queues_per_device) { | ||
| 187 | pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", | ||
| 188 | dqm->total_queue_count); | ||
| 189 | mutex_unlock(&dqm->lock); | ||
| 190 | return -EPERM; | ||
| 191 | } | ||
| 192 | |||
| 186 | if (list_empty(&qpd->queues_list)) { | 193 | if (list_empty(&qpd->queues_list)) { |
| 187 | retval = allocate_vmid(dqm, qpd, q); | 194 | retval = allocate_vmid(dqm, qpd, q); |
| 188 | if (retval != 0) { | 195 | if (retval != 0) { |
| @@ -207,6 +214,14 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, | |||
| 207 | list_add(&q->list, &qpd->queues_list); | 214 | list_add(&q->list, &qpd->queues_list); |
| 208 | dqm->queue_count++; | 215 | dqm->queue_count++; |
| 209 | 216 | ||
| 217 | /* | ||
| 218 | * Unconditionally increment this counter, regardless of the queue's | ||
| 219 | * type or whether the queue is active. | ||
| 220 | */ | ||
| 221 | dqm->total_queue_count++; | ||
| 222 | pr_debug("Total of %d queues are accountable so far\n", | ||
| 223 | dqm->total_queue_count); | ||
| 224 | |||
| 210 | mutex_unlock(&dqm->lock); | 225 | mutex_unlock(&dqm->lock); |
| 211 | return 0; | 226 | return 0; |
| 212 | } | 227 | } |
| @@ -326,6 +341,15 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm, | |||
| 326 | if (list_empty(&qpd->queues_list)) | 341 | if (list_empty(&qpd->queues_list)) |
| 327 | deallocate_vmid(dqm, qpd, q); | 342 | deallocate_vmid(dqm, qpd, q); |
| 328 | dqm->queue_count--; | 343 | dqm->queue_count--; |
| 344 | |||
| 345 | /* | ||
| 346 | * Unconditionally decrement this counter, regardless of the queue's | ||
| 347 | * type | ||
| 348 | */ | ||
| 349 | dqm->total_queue_count--; | ||
| 350 | pr_debug("Total of %d queues are accountable so far\n", | ||
| 351 | dqm->total_queue_count); | ||
| 352 | |||
| 329 | out: | 353 | out: |
| 330 | mutex_unlock(&dqm->lock); | 354 | mutex_unlock(&dqm->lock); |
| 331 | return retval; | 355 | return retval; |
| @@ -541,10 +565,14 @@ static int init_pipelines(struct device_queue_manager *dqm, | |||
| 541 | 565 | ||
| 542 | for (i = 0; i < pipes_num; i++) { | 566 | for (i = 0; i < pipes_num; i++) { |
| 543 | inx = i + first_pipe; | 567 | inx = i + first_pipe; |
| 568 | /* | ||
| 569 | * HPD buffer on GTT is allocated by amdkfd, no need to waste | ||
| 570 | * space in GTT for pipelines we don't initialize | ||
| 571 | */ | ||
| 544 | pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES; | 572 | pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES; |
| 545 | pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr); | 573 | pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr); |
| 546 | /* = log2(bytes/4)-1 */ | 574 | /* = log2(bytes/4)-1 */ |
| 547 | kfd2kgd->init_pipeline(dqm->dev->kgd, i, | 575 | kfd2kgd->init_pipeline(dqm->dev->kgd, inx, |
| 548 | CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr); | 576 | CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr); |
| 549 | } | 577 | } |
| 550 | 578 | ||
| @@ -560,7 +588,7 @@ static int init_scheduler(struct device_queue_manager *dqm) | |||
| 560 | 588 | ||
| 561 | pr_debug("kfd: In %s\n", __func__); | 589 | pr_debug("kfd: In %s\n", __func__); |
| 562 | 590 | ||
| 563 | retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE); | 591 | retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm)); |
| 564 | if (retval != 0) | 592 | if (retval != 0) |
| 565 | return retval; | 593 | return retval; |
| 566 | 594 | ||
| @@ -752,6 +780,21 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, | |||
| 752 | pr_debug("kfd: In func %s\n", __func__); | 780 | pr_debug("kfd: In func %s\n", __func__); |
| 753 | 781 | ||
| 754 | mutex_lock(&dqm->lock); | 782 | mutex_lock(&dqm->lock); |
| 783 | if (dqm->total_queue_count >= max_num_of_queues_per_device) { | ||
| 784 | pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n", | ||
| 785 | dqm->total_queue_count); | ||
| 786 | mutex_unlock(&dqm->lock); | ||
| 787 | return -EPERM; | ||
| 788 | } | ||
| 789 | |||
| 790 | /* | ||
| 791 | * Unconditionally increment this counter, regardless of the queue's | ||
| 792 | * type or whether the queue is active. | ||
| 793 | */ | ||
| 794 | dqm->total_queue_count++; | ||
| 795 | pr_debug("Total of %d queues are accountable so far\n", | ||
| 796 | dqm->total_queue_count); | ||
| 797 | |||
| 755 | list_add(&kq->list, &qpd->priv_queue_list); | 798 | list_add(&kq->list, &qpd->priv_queue_list); |
| 756 | dqm->queue_count++; | 799 | dqm->queue_count++; |
| 757 | qpd->is_debug = true; | 800 | qpd->is_debug = true; |
| @@ -775,6 +818,13 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, | |||
| 775 | dqm->queue_count--; | 818 | dqm->queue_count--; |
| 776 | qpd->is_debug = false; | 819 | qpd->is_debug = false; |
| 777 | execute_queues_cpsch(dqm, false); | 820 | execute_queues_cpsch(dqm, false); |
| 821 | /* | ||
| 822 | * Unconditionally decrement this counter, regardless of the queue's | ||
| 823 | * type. | ||
| 824 | */ | ||
| 825 | dqm->total_queue_count++; | ||
| 826 | pr_debug("Total of %d queues are accountable so far\n", | ||
| 827 | dqm->total_queue_count); | ||
| 778 | mutex_unlock(&dqm->lock); | 828 | mutex_unlock(&dqm->lock); |
| 779 | } | 829 | } |
| 780 | 830 | ||
| @@ -793,6 +843,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, | |||
| 793 | 843 | ||
| 794 | mutex_lock(&dqm->lock); | 844 | mutex_lock(&dqm->lock); |
| 795 | 845 | ||
| 846 | if (dqm->total_queue_count >= max_num_of_queues_per_device) { | ||
| 847 | pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", | ||
| 848 | dqm->total_queue_count); | ||
| 849 | retval = -EPERM; | ||
| 850 | goto out; | ||
| 851 | } | ||
| 852 | |||
| 796 | mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP); | 853 | mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP); |
| 797 | if (mqd == NULL) { | 854 | if (mqd == NULL) { |
| 798 | mutex_unlock(&dqm->lock); | 855 | mutex_unlock(&dqm->lock); |
| @@ -810,6 +867,15 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, | |||
| 810 | retval = execute_queues_cpsch(dqm, false); | 867 | retval = execute_queues_cpsch(dqm, false); |
| 811 | } | 868 | } |
| 812 | 869 | ||
| 870 | /* | ||
| 871 | * Unconditionally increment this counter, regardless of the queue's | ||
| 872 | * type or whether the queue is active. | ||
| 873 | */ | ||
| 874 | dqm->total_queue_count++; | ||
| 875 | |||
| 876 | pr_debug("Total of %d queues are accountable so far\n", | ||
| 877 | dqm->total_queue_count); | ||
| 878 | |||
| 813 | out: | 879 | out: |
| 814 | mutex_unlock(&dqm->lock); | 880 | mutex_unlock(&dqm->lock); |
| 815 | return retval; | 881 | return retval; |
| @@ -930,6 +996,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, | |||
| 930 | 996 | ||
| 931 | mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); | 997 | mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); |
| 932 | 998 | ||
| 999 | /* | ||
| 1000 | * Unconditionally decrement this counter, regardless of the queue's | ||
| 1001 | * type | ||
| 1002 | */ | ||
| 1003 | dqm->total_queue_count--; | ||
| 1004 | pr_debug("Total of %d queues are accountable so far\n", | ||
| 1005 | dqm->total_queue_count); | ||
| 1006 | |||
| 933 | mutex_unlock(&dqm->lock); | 1007 | mutex_unlock(&dqm->lock); |
| 934 | 1008 | ||
| 935 | return 0; | 1009 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index c3f189e8ae35..52035bf0c1cb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | |||
| @@ -130,6 +130,7 @@ struct device_queue_manager { | |||
| 130 | struct list_head queues; | 130 | struct list_head queues; |
| 131 | unsigned int processes_count; | 131 | unsigned int processes_count; |
| 132 | unsigned int queue_count; | 132 | unsigned int queue_count; |
| 133 | unsigned int total_queue_count; | ||
| 133 | unsigned int next_pipe_to_allocate; | 134 | unsigned int next_pipe_to_allocate; |
| 134 | unsigned int *allocated_queues; | 135 | unsigned int *allocated_queues; |
| 135 | unsigned int vmid_bitmap; | 136 | unsigned int vmid_bitmap; |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index 95d5af138e6e..a8be6df85347 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c | |||
| @@ -50,15 +50,10 @@ module_param(sched_policy, int, 0444); | |||
| 50 | MODULE_PARM_DESC(sched_policy, | 50 | MODULE_PARM_DESC(sched_policy, |
| 51 | "Kernel cmdline parameter that defines the amdkfd scheduling policy"); | 51 | "Kernel cmdline parameter that defines the amdkfd scheduling policy"); |
| 52 | 52 | ||
| 53 | int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT; | 53 | int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT; |
| 54 | module_param(max_num_of_processes, int, 0444); | 54 | module_param(max_num_of_queues_per_device, int, 0444); |
| 55 | MODULE_PARM_DESC(max_num_of_processes, | 55 | MODULE_PARM_DESC(max_num_of_queues_per_device, |
| 56 | "Kernel cmdline parameter that defines the amdkfd maximum number of supported processes"); | 56 | "Maximum number of supported queues per device (1 = Minimum, 4096 = default)"); |
| 57 | |||
| 58 | int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT; | ||
| 59 | module_param(max_num_of_queues_per_process, int, 0444); | ||
| 60 | MODULE_PARM_DESC(max_num_of_queues_per_process, | ||
| 61 | "Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process"); | ||
| 62 | 57 | ||
| 63 | bool kgd2kfd_init(unsigned interface_version, | 58 | bool kgd2kfd_init(unsigned interface_version, |
| 64 | const struct kfd2kgd_calls *f2g, | 59 | const struct kfd2kgd_calls *f2g, |
| @@ -100,16 +95,10 @@ static int __init kfd_module_init(void) | |||
| 100 | } | 95 | } |
| 101 | 96 | ||
| 102 | /* Verify module parameters */ | 97 | /* Verify module parameters */ |
| 103 | if ((max_num_of_processes < 0) || | 98 | if ((max_num_of_queues_per_device < 0) || |
| 104 | (max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) { | 99 | (max_num_of_queues_per_device > |
| 105 | pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n"); | 100 | KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) { |
| 106 | return -1; | 101 | pr_err("kfd: max_num_of_queues_per_device must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n"); |
| 107 | } | ||
| 108 | |||
| 109 | if ((max_num_of_queues_per_process < 0) || | ||
| 110 | (max_num_of_queues_per_process > | ||
| 111 | KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) { | ||
| 112 | pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n"); | ||
| 113 | return -1; | 102 | return -1; |
| 114 | } | 103 | } |
| 115 | 104 | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c index 4c25ef504f79..6cfe7f1f18cf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c | |||
| @@ -30,7 +30,7 @@ static DEFINE_MUTEX(pasid_mutex); | |||
| 30 | 30 | ||
| 31 | int kfd_pasid_init(void) | 31 | int kfd_pasid_init(void) |
| 32 | { | 32 | { |
| 33 | pasid_limit = max_num_of_processes; | 33 | pasid_limit = KFD_MAX_NUM_OF_PROCESSES; |
| 34 | 34 | ||
| 35 | pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); | 35 | pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); |
| 36 | if (!pasid_bitmap) | 36 | if (!pasid_bitmap) |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index b3dc13c83169..96dc10e8904a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h | |||
| @@ -52,20 +52,19 @@ | |||
| 52 | #define kfd_alloc_struct(ptr_to_struct) \ | 52 | #define kfd_alloc_struct(ptr_to_struct) \ |
| 53 | ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) | 53 | ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) |
| 54 | 54 | ||
| 55 | /* Kernel module parameter to specify maximum number of supported processes */ | ||
| 56 | extern int max_num_of_processes; | ||
| 57 | |||
| 58 | #define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32 | ||
| 59 | #define KFD_MAX_NUM_OF_PROCESSES 512 | 55 | #define KFD_MAX_NUM_OF_PROCESSES 512 |
| 56 | #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 | ||
| 60 | 57 | ||
| 61 | /* | 58 | /* |
| 62 | * Kernel module parameter to specify maximum number of supported queues | 59 | * Kernel module parameter to specify maximum number of supported queues per |
| 63 | * per process | 60 | * device |
| 64 | */ | 61 | */ |
| 65 | extern int max_num_of_queues_per_process; | 62 | extern int max_num_of_queues_per_device; |
| 66 | 63 | ||
| 67 | #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128 | 64 | #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096 |
| 68 | #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 | 65 | #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \ |
| 66 | (KFD_MAX_NUM_OF_PROCESSES * \ | ||
| 67 | KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) | ||
| 69 | 68 | ||
| 70 | #define KFD_KERNEL_QUEUE_SIZE 2048 | 69 | #define KFD_KERNEL_QUEUE_SIZE 2048 |
| 71 | 70 | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 47526780d736..f37cf5efe642 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | |||
| @@ -54,11 +54,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm, | |||
| 54 | pr_debug("kfd: in %s\n", __func__); | 54 | pr_debug("kfd: in %s\n", __func__); |
| 55 | 55 | ||
| 56 | found = find_first_zero_bit(pqm->queue_slot_bitmap, | 56 | found = find_first_zero_bit(pqm->queue_slot_bitmap, |
| 57 | max_num_of_queues_per_process); | 57 | KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); |
| 58 | 58 | ||
| 59 | pr_debug("kfd: the new slot id %lu\n", found); | 59 | pr_debug("kfd: the new slot id %lu\n", found); |
| 60 | 60 | ||
| 61 | if (found >= max_num_of_queues_per_process) { | 61 | if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) { |
| 62 | pr_info("amdkfd: Can not open more queues for process with pasid %d\n", | 62 | pr_info("amdkfd: Can not open more queues for process with pasid %d\n", |
| 63 | pqm->process->pasid); | 63 | pqm->process->pasid); |
| 64 | return -ENOMEM; | 64 | return -ENOMEM; |
| @@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p) | |||
| 76 | 76 | ||
| 77 | INIT_LIST_HEAD(&pqm->queues); | 77 | INIT_LIST_HEAD(&pqm->queues); |
| 78 | pqm->queue_slot_bitmap = | 78 | pqm->queue_slot_bitmap = |
| 79 | kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process, | 79 | kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, |
| 80 | BITS_PER_BYTE), GFP_KERNEL); | 80 | BITS_PER_BYTE), GFP_KERNEL); |
| 81 | if (pqm->queue_slot_bitmap == NULL) | 81 | if (pqm->queue_slot_bitmap == NULL) |
| 82 | return -ENOMEM; | 82 | return -ENOMEM; |
| @@ -203,6 +203,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, | |||
| 203 | pqn->kq = NULL; | 203 | pqn->kq = NULL; |
| 204 | retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd, | 204 | retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd, |
| 205 | &q->properties.vmid); | 205 | &q->properties.vmid); |
| 206 | pr_debug("DQM returned %d for create_queue\n", retval); | ||
| 206 | print_queue(q); | 207 | print_queue(q); |
| 207 | break; | 208 | break; |
| 208 | case KFD_QUEUE_TYPE_DIQ: | 209 | case KFD_QUEUE_TYPE_DIQ: |
| @@ -222,7 +223,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, | |||
| 222 | } | 223 | } |
| 223 | 224 | ||
| 224 | if (retval != 0) { | 225 | if (retval != 0) { |
| 225 | pr_err("kfd: error dqm create queue\n"); | 226 | pr_debug("Error dqm create queue\n"); |
| 226 | goto err_create_queue; | 227 | goto err_create_queue; |
| 227 | } | 228 | } |
| 228 | 229 | ||
| @@ -241,7 +242,10 @@ int pqm_create_queue(struct process_queue_manager *pqm, | |||
| 241 | err_create_queue: | 242 | err_create_queue: |
| 242 | kfree(pqn); | 243 | kfree(pqn); |
| 243 | err_allocate_pqn: | 244 | err_allocate_pqn: |
| 245 | /* check if queues list is empty unregister process from device */ | ||
| 244 | clear_bit(*qid, pqm->queue_slot_bitmap); | 246 | clear_bit(*qid, pqm->queue_slot_bitmap); |
| 247 | if (list_empty(&pqm->queues)) | ||
| 248 | dev->dqm->unregister_process(dev->dqm, &pdd->qpd); | ||
| 245 | return retval; | 249 | return retval; |
| 246 | } | 250 | } |
| 247 | 251 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c index 8bf87f1203cc..bef9a0953284 100644 --- a/drivers/gpu/drm/radeon/radeon_kfd.c +++ b/drivers/gpu/drm/radeon/radeon_kfd.c | |||
| @@ -436,7 +436,7 @@ static int kgd_init_memory(struct kgd_dev *kgd) | |||
| 436 | static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, | 436 | static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, |
| 437 | uint32_t hpd_size, uint64_t hpd_gpu_addr) | 437 | uint32_t hpd_size, uint64_t hpd_gpu_addr) |
| 438 | { | 438 | { |
| 439 | uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1; | 439 | uint32_t mec = (pipe_id / CIK_PIPE_PER_MEC) + 1; |
| 440 | uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC); | 440 | uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC); |
| 441 | 441 | ||
| 442 | lock_srbm(kgd, mec, pipe, 0, 0); | 442 | lock_srbm(kgd, mec, pipe, 0, 0); |
