diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu.h')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 81 |
1 files changed, 54 insertions, 27 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 2fc58e658986..668939a14206 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -98,6 +98,9 @@ extern int amdgpu_sched_hw_submission; | |||
98 | #define AMDGPU_MAX_COMPUTE_RINGS 8 | 98 | #define AMDGPU_MAX_COMPUTE_RINGS 8 |
99 | #define AMDGPU_MAX_VCE_RINGS 2 | 99 | #define AMDGPU_MAX_VCE_RINGS 2 |
100 | 100 | ||
101 | /* max number of IP instances */ | ||
102 | #define AMDGPU_MAX_SDMA_INSTANCES 2 | ||
103 | |||
101 | /* number of hw syncs before falling back on blocking */ | 104 | /* number of hw syncs before falling back on blocking */ |
102 | #define AMDGPU_NUM_SYNCS 4 | 105 | #define AMDGPU_NUM_SYNCS 4 |
103 | 106 | ||
@@ -183,6 +186,7 @@ struct amdgpu_vm; | |||
183 | struct amdgpu_ring; | 186 | struct amdgpu_ring; |
184 | struct amdgpu_semaphore; | 187 | struct amdgpu_semaphore; |
185 | struct amdgpu_cs_parser; | 188 | struct amdgpu_cs_parser; |
189 | struct amdgpu_job; | ||
186 | struct amdgpu_irq_src; | 190 | struct amdgpu_irq_src; |
187 | struct amdgpu_fpriv; | 191 | struct amdgpu_fpriv; |
188 | 192 | ||
@@ -246,7 +250,7 @@ struct amdgpu_buffer_funcs { | |||
246 | unsigned copy_num_dw; | 250 | unsigned copy_num_dw; |
247 | 251 | ||
248 | /* used for buffer migration */ | 252 | /* used for buffer migration */ |
249 | void (*emit_copy_buffer)(struct amdgpu_ring *ring, | 253 | void (*emit_copy_buffer)(struct amdgpu_ib *ib, |
250 | /* src addr in bytes */ | 254 | /* src addr in bytes */ |
251 | uint64_t src_offset, | 255 | uint64_t src_offset, |
252 | /* dst addr in bytes */ | 256 | /* dst addr in bytes */ |
@@ -261,7 +265,7 @@ struct amdgpu_buffer_funcs { | |||
261 | unsigned fill_num_dw; | 265 | unsigned fill_num_dw; |
262 | 266 | ||
263 | /* used for buffer clearing */ | 267 | /* used for buffer clearing */ |
264 | void (*emit_fill_buffer)(struct amdgpu_ring *ring, | 268 | void (*emit_fill_buffer)(struct amdgpu_ib *ib, |
265 | /* value to write to memory */ | 269 | /* value to write to memory */ |
266 | uint32_t src_data, | 270 | uint32_t src_data, |
267 | /* dst addr in bytes */ | 271 | /* dst addr in bytes */ |
@@ -339,6 +343,8 @@ struct amdgpu_ring_funcs { | |||
339 | int (*test_ring)(struct amdgpu_ring *ring); | 343 | int (*test_ring)(struct amdgpu_ring *ring); |
340 | int (*test_ib)(struct amdgpu_ring *ring); | 344 | int (*test_ib)(struct amdgpu_ring *ring); |
341 | bool (*is_lockup)(struct amdgpu_ring *ring); | 345 | bool (*is_lockup)(struct amdgpu_ring *ring); |
346 | /* insert NOP packets */ | ||
347 | void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); | ||
342 | }; | 348 | }; |
343 | 349 | ||
344 | /* | 350 | /* |
@@ -440,8 +446,10 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); | |||
440 | unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); | 446 | unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); |
441 | 447 | ||
442 | signed long amdgpu_fence_wait_any(struct amdgpu_device *adev, | 448 | signed long amdgpu_fence_wait_any(struct amdgpu_device *adev, |
443 | struct amdgpu_fence **fences, | 449 | struct fence **array, |
444 | bool intr, long t); | 450 | uint32_t count, |
451 | bool intr, | ||
452 | signed long t); | ||
445 | struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence); | 453 | struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence); |
446 | void amdgpu_fence_unref(struct amdgpu_fence **fence); | 454 | void amdgpu_fence_unref(struct amdgpu_fence **fence); |
447 | 455 | ||
@@ -514,7 +522,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, | |||
514 | uint64_t dst_offset, | 522 | uint64_t dst_offset, |
515 | uint32_t byte_count, | 523 | uint32_t byte_count, |
516 | struct reservation_object *resv, | 524 | struct reservation_object *resv, |
517 | struct amdgpu_fence **fence); | 525 | struct fence **fence); |
518 | int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); | 526 | int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); |
519 | 527 | ||
520 | struct amdgpu_bo_list_entry { | 528 | struct amdgpu_bo_list_entry { |
@@ -650,7 +658,7 @@ struct amdgpu_sa_bo { | |||
650 | struct amdgpu_sa_manager *manager; | 658 | struct amdgpu_sa_manager *manager; |
651 | unsigned soffset; | 659 | unsigned soffset; |
652 | unsigned eoffset; | 660 | unsigned eoffset; |
653 | struct amdgpu_fence *fence; | 661 | struct fence *fence; |
654 | }; | 662 | }; |
655 | 663 | ||
656 | /* | 664 | /* |
@@ -692,7 +700,7 @@ bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring, | |||
692 | struct amdgpu_semaphore *semaphore); | 700 | struct amdgpu_semaphore *semaphore); |
693 | void amdgpu_semaphore_free(struct amdgpu_device *adev, | 701 | void amdgpu_semaphore_free(struct amdgpu_device *adev, |
694 | struct amdgpu_semaphore **semaphore, | 702 | struct amdgpu_semaphore **semaphore, |
695 | struct amdgpu_fence *fence); | 703 | struct fence *fence); |
696 | 704 | ||
697 | /* | 705 | /* |
698 | * Synchronization | 706 | * Synchronization |
@@ -700,7 +708,8 @@ void amdgpu_semaphore_free(struct amdgpu_device *adev, | |||
700 | struct amdgpu_sync { | 708 | struct amdgpu_sync { |
701 | struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS]; | 709 | struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS]; |
702 | struct amdgpu_fence *sync_to[AMDGPU_MAX_RINGS]; | 710 | struct amdgpu_fence *sync_to[AMDGPU_MAX_RINGS]; |
703 | struct amdgpu_fence *last_vm_update; | 711 | DECLARE_HASHTABLE(fences, 4); |
712 | struct fence *last_vm_update; | ||
704 | }; | 713 | }; |
705 | 714 | ||
706 | void amdgpu_sync_create(struct amdgpu_sync *sync); | 715 | void amdgpu_sync_create(struct amdgpu_sync *sync); |
@@ -712,8 +721,10 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, | |||
712 | void *owner); | 721 | void *owner); |
713 | int amdgpu_sync_rings(struct amdgpu_sync *sync, | 722 | int amdgpu_sync_rings(struct amdgpu_sync *sync, |
714 | struct amdgpu_ring *ring); | 723 | struct amdgpu_ring *ring); |
724 | struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); | ||
725 | int amdgpu_sync_wait(struct amdgpu_sync *sync); | ||
715 | void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync, | 726 | void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync, |
716 | struct amdgpu_fence *fence); | 727 | struct fence *fence); |
717 | 728 | ||
718 | /* | 729 | /* |
719 | * GART structures, functions & helpers | 730 | * GART structures, functions & helpers |
@@ -871,7 +882,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, | |||
871 | struct amdgpu_ring *ring, | 882 | struct amdgpu_ring *ring, |
872 | struct amdgpu_ib *ibs, | 883 | struct amdgpu_ib *ibs, |
873 | unsigned num_ibs, | 884 | unsigned num_ibs, |
874 | int (*free_job)(struct amdgpu_cs_parser *), | 885 | int (*free_job)(struct amdgpu_job *), |
875 | void *owner, | 886 | void *owner, |
876 | struct fence **fence); | 887 | struct fence **fence); |
877 | 888 | ||
@@ -957,7 +968,7 @@ struct amdgpu_vm_id { | |||
957 | unsigned id; | 968 | unsigned id; |
958 | uint64_t pd_gpu_addr; | 969 | uint64_t pd_gpu_addr; |
959 | /* last flushed PD/PT update */ | 970 | /* last flushed PD/PT update */ |
960 | struct amdgpu_fence *flushed_updates; | 971 | struct fence *flushed_updates; |
961 | /* last use of vmid */ | 972 | /* last use of vmid */ |
962 | struct amdgpu_fence *last_id_use; | 973 | struct amdgpu_fence *last_id_use; |
963 | }; | 974 | }; |
@@ -1042,7 +1053,7 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); | |||
1042 | int amdgpu_ctx_put(struct amdgpu_ctx *ctx); | 1053 | int amdgpu_ctx_put(struct amdgpu_ctx *ctx); |
1043 | 1054 | ||
1044 | uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, | 1055 | uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, |
1045 | struct fence *fence, uint64_t queued_seq); | 1056 | struct fence *fence); |
1046 | struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, | 1057 | struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, |
1047 | struct amdgpu_ring *ring, uint64_t seq); | 1058 | struct amdgpu_ring *ring, uint64_t seq); |
1048 | 1059 | ||
@@ -1078,8 +1089,6 @@ struct amdgpu_bo_list { | |||
1078 | }; | 1089 | }; |
1079 | 1090 | ||
1080 | struct amdgpu_bo_list * | 1091 | struct amdgpu_bo_list * |
1081 | amdgpu_bo_list_clone(struct amdgpu_bo_list *list); | ||
1082 | struct amdgpu_bo_list * | ||
1083 | amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id); | 1092 | amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id); |
1084 | void amdgpu_bo_list_put(struct amdgpu_bo_list *list); | 1093 | void amdgpu_bo_list_put(struct amdgpu_bo_list *list); |
1085 | void amdgpu_bo_list_free(struct amdgpu_bo_list *list); | 1094 | void amdgpu_bo_list_free(struct amdgpu_bo_list *list); |
@@ -1210,6 +1219,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev); | |||
1210 | void amdgpu_ring_free_size(struct amdgpu_ring *ring); | 1219 | void amdgpu_ring_free_size(struct amdgpu_ring *ring); |
1211 | int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); | 1220 | int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); |
1212 | int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw); | 1221 | int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw); |
1222 | void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); | ||
1213 | void amdgpu_ring_commit(struct amdgpu_ring *ring); | 1223 | void amdgpu_ring_commit(struct amdgpu_ring *ring); |
1214 | void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring); | 1224 | void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring); |
1215 | void amdgpu_ring_undo(struct amdgpu_ring *ring); | 1225 | void amdgpu_ring_undo(struct amdgpu_ring *ring); |
@@ -1255,14 +1265,16 @@ struct amdgpu_cs_parser { | |||
1255 | 1265 | ||
1256 | /* user fence */ | 1266 | /* user fence */ |
1257 | struct amdgpu_user_fence uf; | 1267 | struct amdgpu_user_fence uf; |
1268 | }; | ||
1258 | 1269 | ||
1259 | struct amdgpu_ring *ring; | 1270 | struct amdgpu_job { |
1260 | struct mutex job_lock; | 1271 | struct amd_sched_job base; |
1261 | struct work_struct job_work; | 1272 | struct amdgpu_device *adev; |
1262 | int (*prepare_job)(struct amdgpu_cs_parser *sched_job); | 1273 | struct amdgpu_ib *ibs; |
1263 | int (*run_job)(struct amdgpu_cs_parser *sched_job); | 1274 | uint32_t num_ibs; |
1264 | int (*free_job)(struct amdgpu_cs_parser *sched_job); | 1275 | struct mutex job_lock; |
1265 | struct amd_sched_fence *s_fence; | 1276 | struct amdgpu_user_fence uf; |
1277 | int (*free_job)(struct amdgpu_job *sched_job); | ||
1266 | }; | 1278 | }; |
1267 | 1279 | ||
1268 | static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) | 1280 | static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) |
@@ -1659,7 +1671,6 @@ struct amdgpu_uvd { | |||
1659 | struct amdgpu_bo *vcpu_bo; | 1671 | struct amdgpu_bo *vcpu_bo; |
1660 | void *cpu_addr; | 1672 | void *cpu_addr; |
1661 | uint64_t gpu_addr; | 1673 | uint64_t gpu_addr; |
1662 | void *saved_bo; | ||
1663 | atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; | 1674 | atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; |
1664 | struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; | 1675 | struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; |
1665 | struct delayed_work idle_work; | 1676 | struct delayed_work idle_work; |
@@ -1703,6 +1714,7 @@ struct amdgpu_sdma { | |||
1703 | uint32_t feature_version; | 1714 | uint32_t feature_version; |
1704 | 1715 | ||
1705 | struct amdgpu_ring ring; | 1716 | struct amdgpu_ring ring; |
1717 | bool burst_nop; | ||
1706 | }; | 1718 | }; |
1707 | 1719 | ||
1708 | /* | 1720 | /* |
@@ -2051,7 +2063,7 @@ struct amdgpu_device { | |||
2051 | struct amdgpu_gfx gfx; | 2063 | struct amdgpu_gfx gfx; |
2052 | 2064 | ||
2053 | /* sdma */ | 2065 | /* sdma */ |
2054 | struct amdgpu_sdma sdma[2]; | 2066 | struct amdgpu_sdma sdma[AMDGPU_MAX_SDMA_INSTANCES]; |
2055 | struct amdgpu_irq_src sdma_trap_irq; | 2067 | struct amdgpu_irq_src sdma_trap_irq; |
2056 | struct amdgpu_irq_src sdma_illegal_inst_irq; | 2068 | struct amdgpu_irq_src sdma_illegal_inst_irq; |
2057 | 2069 | ||
@@ -2190,6 +2202,21 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) | |||
2190 | ring->ring_free_dw--; | 2202 | ring->ring_free_dw--; |
2191 | } | 2203 | } |
2192 | 2204 | ||
2205 | static inline struct amdgpu_sdma * amdgpu_get_sdma_instance(struct amdgpu_ring *ring) | ||
2206 | { | ||
2207 | struct amdgpu_device *adev = ring->adev; | ||
2208 | int i; | ||
2209 | |||
2210 | for (i = 0; i < AMDGPU_MAX_SDMA_INSTANCES; i++) | ||
2211 | if (&adev->sdma[i].ring == ring) | ||
2212 | break; | ||
2213 | |||
2214 | if (i < AMDGPU_MAX_SDMA_INSTANCES) | ||
2215 | return &adev->sdma[i]; | ||
2216 | else | ||
2217 | return NULL; | ||
2218 | } | ||
2219 | |||
2193 | /* | 2220 | /* |
2194 | * ASICs macro. | 2221 | * ASICs macro. |
2195 | */ | 2222 | */ |
@@ -2241,8 +2268,8 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) | |||
2241 | #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) | 2268 | #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) |
2242 | #define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s)) | 2269 | #define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s)) |
2243 | #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s)) | 2270 | #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s)) |
2244 | #define amdgpu_emit_copy_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((r), (s), (d), (b)) | 2271 | #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) |
2245 | #define amdgpu_emit_fill_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((r), (s), (d), (b)) | 2272 | #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) |
2246 | #define amdgpu_dpm_get_temperature(adev) (adev)->pm.funcs->get_temperature((adev)) | 2273 | #define amdgpu_dpm_get_temperature(adev) (adev)->pm.funcs->get_temperature((adev)) |
2247 | #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev)) | 2274 | #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev)) |
2248 | #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev)) | 2275 | #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev)) |
@@ -2343,7 +2370,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
2343 | struct amdgpu_sync *sync); | 2370 | struct amdgpu_sync *sync); |
2344 | void amdgpu_vm_flush(struct amdgpu_ring *ring, | 2371 | void amdgpu_vm_flush(struct amdgpu_ring *ring, |
2345 | struct amdgpu_vm *vm, | 2372 | struct amdgpu_vm *vm, |
2346 | struct amdgpu_fence *updates); | 2373 | struct fence *updates); |
2347 | void amdgpu_vm_fence(struct amdgpu_device *adev, | 2374 | void amdgpu_vm_fence(struct amdgpu_device *adev, |
2348 | struct amdgpu_vm *vm, | 2375 | struct amdgpu_vm *vm, |
2349 | struct amdgpu_fence *fence); | 2376 | struct amdgpu_fence *fence); |
@@ -2373,7 +2400,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | |||
2373 | uint64_t addr); | 2400 | uint64_t addr); |
2374 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | 2401 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, |
2375 | struct amdgpu_bo_va *bo_va); | 2402 | struct amdgpu_bo_va *bo_va); |
2376 | 2403 | int amdgpu_vm_free_job(struct amdgpu_job *job); | |
2377 | /* | 2404 | /* |
2378 | * functions used by amdgpu_encoder.c | 2405 | * functions used by amdgpu_encoder.c |
2379 | */ | 2406 | */ |