aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/include/kgd_kfd_interface.h')
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h26
1 files changed, 21 insertions, 5 deletions
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 237289a72bb7..5733fbee07f7 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -100,6 +100,21 @@ struct kgd2kfd_shared_resources {
100 /* Bit n == 1 means Queue n is available for KFD */ 100 /* Bit n == 1 means Queue n is available for KFD */
101 DECLARE_BITMAP(queue_bitmap, KGD_MAX_QUEUES); 101 DECLARE_BITMAP(queue_bitmap, KGD_MAX_QUEUES);
102 102
103 /* Doorbell assignments (SOC15 and later chips only). Only
104 * specific doorbells are routed to each SDMA engine. Others
105 * are routed to IH and VCN. They are not usable by the CP.
106 *
107 * Any doorbell number D that satisfies the following condition
108 * is reserved: (D & reserved_doorbell_mask) == reserved_doorbell_val
109 *
110 * KFD currently uses 1024 (= 0x3ff) doorbells per process. If
111 * doorbells 0x0f0-0x0f7 and 0x2f-0x2f7 are reserved, that means
112 * mask would be set to 0x1f8 and val set to 0x0f0.
113 */
114 unsigned int sdma_doorbell[2][2];
115 unsigned int reserved_doorbell_mask;
116 unsigned int reserved_doorbell_val;
117
103 /* Base address of doorbell aperture. */ 118 /* Base address of doorbell aperture. */
104 phys_addr_t doorbell_physical_address; 119 phys_addr_t doorbell_physical_address;
105 120
@@ -173,8 +188,6 @@ struct tile_config {
173 * @set_pasid_vmid_mapping: Exposes pasid/vmid pair to the H/W for no cp 188 * @set_pasid_vmid_mapping: Exposes pasid/vmid pair to the H/W for no cp
174 * scheduling mode. Only used for no cp scheduling mode. 189 * scheduling mode. Only used for no cp scheduling mode.
175 * 190 *
176 * @init_pipeline: Initialized the compute pipelines.
177 *
178 * @hqd_load: Loads the mqd structure to a H/W hqd slot. used only for no cp 191 * @hqd_load: Loads the mqd structure to a H/W hqd slot. used only for no cp
179 * sceduling mode. 192 * sceduling mode.
180 * 193 *
@@ -274,9 +287,6 @@ struct kfd2kgd_calls {
274 int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, unsigned int pasid, 287 int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, unsigned int pasid,
275 unsigned int vmid); 288 unsigned int vmid);
276 289
277 int (*init_pipeline)(struct kgd_dev *kgd, uint32_t pipe_id,
278 uint32_t hpd_size, uint64_t hpd_gpu_addr);
279
280 int (*init_interrupts)(struct kgd_dev *kgd, uint32_t pipe_id); 290 int (*init_interrupts)(struct kgd_dev *kgd, uint32_t pipe_id);
281 291
282 int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, 292 int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
@@ -382,6 +392,10 @@ struct kfd2kgd_calls {
382 * 392 *
383 * @resume: Notifies amdkfd about a resume action done to a kgd device 393 * @resume: Notifies amdkfd about a resume action done to a kgd device
384 * 394 *
395 * @quiesce_mm: Quiesce all user queue access to specified MM address space
396 *
397 * @resume_mm: Resume user queue access to specified MM address space
398 *
385 * @schedule_evict_and_restore_process: Schedules work queue that will prepare 399 * @schedule_evict_and_restore_process: Schedules work queue that will prepare
386 * for safe eviction of KFD BOs that belong to the specified process. 400 * for safe eviction of KFD BOs that belong to the specified process.
387 * 401 *
@@ -399,6 +413,8 @@ struct kgd2kfd_calls {
399 void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry); 413 void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry);
400 void (*suspend)(struct kfd_dev *kfd); 414 void (*suspend)(struct kfd_dev *kfd);
401 int (*resume)(struct kfd_dev *kfd); 415 int (*resume)(struct kfd_dev *kfd);
416 int (*quiesce_mm)(struct mm_struct *mm);
417 int (*resume_mm)(struct mm_struct *mm);
402 int (*schedule_evict_and_restore_process)(struct mm_struct *mm, 418 int (*schedule_evict_and_restore_process)(struct mm_struct *mm,
403 struct dma_fence *fence); 419 struct dma_fence *fence);
404}; 420};