aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOded Gabbay <oded.gabbay@amd.com>2014-10-26 16:00:31 -0400
committerOded Gabbay <oded.gabbay@amd.com>2015-01-09 15:26:10 -0500
commita86aa3ca5a2f16772653782c078f62a7d76dd57e (patch)
tree42a8dc00bc514cd363351542bcce9cf131b3ae0a
parent73a1da0bb3b32a552817c57dcaebef09bd2f3677 (diff)
drm/amdkfd: Using new gtt sa in amdkfd
This patch change the calls throughout the amdkfd driver from the old kfd-->kgd interface to the new kfd gtt sa inside amdkfd v2: change the new call in sdma code that appeared because of the sdma feature Signed-off-by: Oded Gabbay <oded.gabbay@amd.com> Reviewed-by: Alexey Skidanov <Alexey.skidanov@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c23
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c41
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c10
4 files changed, 33 insertions, 65 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 7ead0802883d..6806e64c5ffd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -519,11 +519,8 @@ static int init_pipelines(struct device_queue_manager *dqm,
519 * because it contains no data when there are no active queues. 519 * because it contains no data when there are no active queues.
520 */ 520 */
521 521
522 err = kfd2kgd->allocate_mem(dqm->dev->kgd, 522 err = kfd_gtt_sa_allocate(dqm->dev, CIK_HPD_EOP_BYTES * pipes_num,
523 CIK_HPD_EOP_BYTES * pipes_num, 523 &dqm->pipeline_mem);
524 PAGE_SIZE,
525 KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
526 (struct kgd_mem **) &dqm->pipeline_mem);
527 524
528 if (err) { 525 if (err) {
529 pr_err("kfd: error allocate vidmem num pipes: %d\n", 526 pr_err("kfd: error allocate vidmem num pipes: %d\n",
@@ -538,8 +535,7 @@ static int init_pipelines(struct device_queue_manager *dqm,
538 535
539 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE); 536 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
540 if (mqd == NULL) { 537 if (mqd == NULL) {
541 kfd2kgd->free_mem(dqm->dev->kgd, 538 kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
542 (struct kgd_mem *) dqm->pipeline_mem);
543 return -ENOMEM; 539 return -ENOMEM;
544 } 540 }
545 541
@@ -614,8 +610,7 @@ static void uninitialize_nocpsch(struct device_queue_manager *dqm)
614 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++) 610 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
615 kfree(dqm->mqds[i]); 611 kfree(dqm->mqds[i]);
616 mutex_destroy(&dqm->lock); 612 mutex_destroy(&dqm->lock);
617 kfd2kgd->free_mem(dqm->dev->kgd, 613 kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
618 (struct kgd_mem *) dqm->pipeline_mem);
619} 614}
620 615
621static int start_nocpsch(struct device_queue_manager *dqm) 616static int start_nocpsch(struct device_queue_manager *dqm)
@@ -773,11 +768,8 @@ static int start_cpsch(struct device_queue_manager *dqm)
773 pr_debug("kfd: allocating fence memory\n"); 768 pr_debug("kfd: allocating fence memory\n");
774 769
775 /* allocate fence memory on the gart */ 770 /* allocate fence memory on the gart */
776 retval = kfd2kgd->allocate_mem(dqm->dev->kgd, 771 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
777 sizeof(*dqm->fence_addr), 772 &dqm->fence_mem);
778 32,
779 KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
780 (struct kgd_mem **) &dqm->fence_mem);
781 773
782 if (retval != 0) 774 if (retval != 0)
783 goto fail_allocate_vidmem; 775 goto fail_allocate_vidmem;
@@ -812,8 +804,7 @@ static int stop_cpsch(struct device_queue_manager *dqm)
812 pdd = qpd_to_pdd(node->qpd); 804 pdd = qpd_to_pdd(node->qpd);
813 pdd->bound = false; 805 pdd->bound = false;
814 } 806 }
815 kfd2kgd->free_mem(dqm->dev->kgd, 807 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
816 (struct kgd_mem *) dqm->fence_mem);
817 pm_uninit(&dqm->packets); 808 pm_uninit(&dqm->packets);
818 809
819 return 0; 810 return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 935071410724..0fd8bb7c863e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -72,11 +72,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
72 if (prop.doorbell_ptr == NULL) 72 if (prop.doorbell_ptr == NULL)
73 goto err_get_kernel_doorbell; 73 goto err_get_kernel_doorbell;
74 74
75 retval = kfd2kgd->allocate_mem(dev->kgd, 75 retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq);
76 queue_size,
77 PAGE_SIZE,
78 KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
79 (struct kgd_mem **) &kq->pq);
80 76
81 if (retval != 0) 77 if (retval != 0)
82 goto err_pq_allocate_vidmem; 78 goto err_pq_allocate_vidmem;
@@ -84,11 +80,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
84 kq->pq_kernel_addr = kq->pq->cpu_ptr; 80 kq->pq_kernel_addr = kq->pq->cpu_ptr;
85 kq->pq_gpu_addr = kq->pq->gpu_addr; 81 kq->pq_gpu_addr = kq->pq->gpu_addr;
86 82
87 retval = kfd2kgd->allocate_mem(dev->kgd, 83 retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->rptr_kernel),
88 sizeof(*kq->rptr_kernel), 84 &kq->rptr_mem);
89 32,
90 KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
91 (struct kgd_mem **) &kq->rptr_mem);
92 85
93 if (retval != 0) 86 if (retval != 0)
94 goto err_rptr_allocate_vidmem; 87 goto err_rptr_allocate_vidmem;
@@ -96,11 +89,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
96 kq->rptr_kernel = kq->rptr_mem->cpu_ptr; 89 kq->rptr_kernel = kq->rptr_mem->cpu_ptr;
97 kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr; 90 kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr;
98 91
99 retval = kfd2kgd->allocate_mem(dev->kgd, 92 retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->wptr_kernel),
100 sizeof(*kq->wptr_kernel), 93 &kq->wptr_mem);
101 32,
102 KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
103 (struct kgd_mem **) &kq->wptr_mem);
104 94
105 if (retval != 0) 95 if (retval != 0)
106 goto err_wptr_allocate_vidmem; 96 goto err_wptr_allocate_vidmem;
@@ -145,11 +135,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
145 } else { 135 } else {
146 /* allocate fence for DIQ */ 136 /* allocate fence for DIQ */
147 137
148 retval = kfd2kgd->allocate_mem(dev->kgd, 138 retval = kfd_gtt_sa_allocate(dev, sizeof(uint32_t),
149 sizeof(uint32_t), 139 &kq->fence_mem_obj);
150 32,
151 KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
152 (struct kgd_mem **) &kq->fence_mem_obj);
153 140
154 if (retval != 0) 141 if (retval != 0)
155 goto err_alloc_fence; 142 goto err_alloc_fence;
@@ -165,11 +152,11 @@ err_alloc_fence:
165err_init_mqd: 152err_init_mqd:
166 uninit_queue(kq->queue); 153 uninit_queue(kq->queue);
167err_init_queue: 154err_init_queue:
168 kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->wptr_mem); 155 kfd_gtt_sa_free(dev, kq->wptr_mem);
169err_wptr_allocate_vidmem: 156err_wptr_allocate_vidmem:
170 kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->rptr_mem); 157 kfd_gtt_sa_free(dev, kq->rptr_mem);
171err_rptr_allocate_vidmem: 158err_rptr_allocate_vidmem:
172 kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->pq); 159 kfd_gtt_sa_free(dev, kq->pq);
173err_pq_allocate_vidmem: 160err_pq_allocate_vidmem:
174 pr_err("kfd: error init pq\n"); 161 pr_err("kfd: error init pq\n");
175 kfd_release_kernel_doorbell(dev, prop.doorbell_ptr); 162 kfd_release_kernel_doorbell(dev, prop.doorbell_ptr);
@@ -190,10 +177,12 @@ static void uninitialize(struct kernel_queue *kq)
190 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS, 177 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
191 kq->queue->pipe, 178 kq->queue->pipe,
192 kq->queue->queue); 179 kq->queue->queue);
180 else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
181 kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj);
193 182
194 kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->rptr_mem); 183 kfd_gtt_sa_free(kq->dev, kq->rptr_mem);
195 kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->wptr_mem); 184 kfd_gtt_sa_free(kq->dev, kq->wptr_mem);
196 kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->pq); 185 kfd_gtt_sa_free(kq->dev, kq->pq);
197 kfd_release_kernel_doorbell(kq->dev, 186 kfd_release_kernel_doorbell(kq->dev,
198 kq->queue->properties.doorbell_ptr); 187 kq->queue->properties.doorbell_ptr);
199 uninit_queue(kq->queue); 188 uninit_queue(kq->queue);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index 1c1fd3c765f7..678c33f0a1b8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -52,11 +52,8 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
52 52
53 pr_debug("kfd: In func %s\n", __func__); 53 pr_debug("kfd: In func %s\n", __func__);
54 54
55 retval = kfd2kgd->allocate_mem(mm->dev->kgd, 55 retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
56 sizeof(struct cik_mqd), 56 mqd_mem_obj);
57 256,
58 KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
59 (struct kgd_mem **) mqd_mem_obj);
60 57
61 if (retval != 0) 58 if (retval != 0)
62 return -ENOMEM; 59 return -ENOMEM;
@@ -121,11 +118,9 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
121 118
122 BUG_ON(!mm || !mqd || !mqd_mem_obj); 119 BUG_ON(!mm || !mqd || !mqd_mem_obj);
123 120
124 retval = kfd2kgd->allocate_mem(mm->dev->kgd, 121 retval = kfd_gtt_sa_allocate(mm->dev,
125 sizeof(struct cik_sdma_rlc_registers), 122 sizeof(struct cik_sdma_rlc_registers),
126 256, 123 mqd_mem_obj);
127 KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
128 (struct kgd_mem **) mqd_mem_obj);
129 124
130 if (retval != 0) 125 if (retval != 0)
131 return -ENOMEM; 126 return -ENOMEM;
@@ -147,14 +142,14 @@ static void uninit_mqd(struct mqd_manager *mm, void *mqd,
147 struct kfd_mem_obj *mqd_mem_obj) 142 struct kfd_mem_obj *mqd_mem_obj)
148{ 143{
149 BUG_ON(!mm || !mqd); 144 BUG_ON(!mm || !mqd);
150 kfd2kgd->free_mem(mm->dev->kgd, (struct kgd_mem *) mqd_mem_obj); 145 kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
151} 146}
152 147
153static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd, 148static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd,
154 struct kfd_mem_obj *mqd_mem_obj) 149 struct kfd_mem_obj *mqd_mem_obj)
155{ 150{
156 BUG_ON(!mm || !mqd); 151 BUG_ON(!mm || !mqd);
157 kfd2kgd->free_mem(mm->dev->kgd, (struct kgd_mem *) mqd_mem_obj); 152 kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
158} 153}
159 154
160static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, 155static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
@@ -306,11 +301,8 @@ static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
306 301
307 pr_debug("kfd: In func %s\n", __func__); 302 pr_debug("kfd: In func %s\n", __func__);
308 303
309 retval = kfd2kgd->allocate_mem(mm->dev->kgd, 304 retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
310 sizeof(struct cik_mqd), 305 mqd_mem_obj);
311 256,
312 KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
313 (struct kgd_mem **) mqd_mem_obj);
314 306
315 if (retval != 0) 307 if (retval != 0)
316 return -ENOMEM; 308 return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 5ce9233d2004..3cda952ac2f8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -97,11 +97,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
97 97
98 pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription); 98 pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
99 99
100 retval = kfd2kgd->allocate_mem(pm->dqm->dev->kgd, 100 retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
101 *rl_buffer_size, 101 &pm->ib_buffer_obj);
102 PAGE_SIZE,
103 KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
104 (struct kgd_mem **) &pm->ib_buffer_obj);
105 102
106 if (retval != 0) { 103 if (retval != 0) {
107 pr_err("kfd: failed to allocate runlist IB\n"); 104 pr_err("kfd: failed to allocate runlist IB\n");
@@ -557,8 +554,7 @@ void pm_release_ib(struct packet_manager *pm)
557 554
558 mutex_lock(&pm->lock); 555 mutex_lock(&pm->lock);
559 if (pm->allocated) { 556 if (pm->allocated) {
560 kfd2kgd->free_mem(pm->dqm->dev->kgd, 557 kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
561 (struct kgd_mem *) pm->ib_buffer_obj);
562 pm->allocated = false; 558 pm->allocated = false;
563 } 559 }
564 mutex_unlock(&pm->lock); 560 mutex_unlock(&pm->lock);