diff options
author | Mark Brown <broonie@kernel.org> | 2015-10-12 13:09:27 -0400 |
---|---|---|
committer | Mark Brown <broonie@kernel.org> | 2015-10-12 13:09:27 -0400 |
commit | 79828b4fa835f73cdaf4bffa48696abdcbea9d02 (patch) | |
tree | 5e0fa7156acb75ba603022bc807df8f2fedb97a8 /drivers/gpu/drm/amd/amdgpu/amdgpu.h | |
parent | 721b51fcf91898299d96f4b72cb9434cda29dce6 (diff) | |
parent | 8c1a9d6323abf0fb1e5dad96cf3f1c783505ea5a (diff) |
Merge remote-tracking branch 'asoc/fix/rt5645' into asoc-fix-rt5645
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu.h')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 229 |
1 files changed, 167 insertions, 62 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 01657830b470..668939a14206 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -42,17 +42,19 @@ | |||
42 | #include <ttm/ttm_module.h> | 42 | #include <ttm/ttm_module.h> |
43 | #include <ttm/ttm_execbuf_util.h> | 43 | #include <ttm/ttm_execbuf_util.h> |
44 | 44 | ||
45 | #include <drm/drmP.h> | ||
45 | #include <drm/drm_gem.h> | 46 | #include <drm/drm_gem.h> |
46 | #include <drm/amdgpu_drm.h> | 47 | #include <drm/amdgpu_drm.h> |
47 | 48 | ||
48 | #include "amd_shared.h" | 49 | #include "amd_shared.h" |
49 | #include "amdgpu_family.h" | ||
50 | #include "amdgpu_mode.h" | 50 | #include "amdgpu_mode.h" |
51 | #include "amdgpu_ih.h" | 51 | #include "amdgpu_ih.h" |
52 | #include "amdgpu_irq.h" | 52 | #include "amdgpu_irq.h" |
53 | #include "amdgpu_ucode.h" | 53 | #include "amdgpu_ucode.h" |
54 | #include "amdgpu_gds.h" | 54 | #include "amdgpu_gds.h" |
55 | 55 | ||
56 | #include "gpu_scheduler.h" | ||
57 | |||
56 | /* | 58 | /* |
57 | * Modules parameters. | 59 | * Modules parameters. |
58 | */ | 60 | */ |
@@ -77,7 +79,11 @@ extern int amdgpu_bapm; | |||
77 | extern int amdgpu_deep_color; | 79 | extern int amdgpu_deep_color; |
78 | extern int amdgpu_vm_size; | 80 | extern int amdgpu_vm_size; |
79 | extern int amdgpu_vm_block_size; | 81 | extern int amdgpu_vm_block_size; |
82 | extern int amdgpu_enable_scheduler; | ||
83 | extern int amdgpu_sched_jobs; | ||
84 | extern int amdgpu_sched_hw_submission; | ||
80 | 85 | ||
86 | #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 | ||
81 | #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ | 87 | #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ |
82 | #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) | 88 | #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) |
83 | /* AMDGPU_IB_POOL_SIZE must be a power of 2 */ | 89 | /* AMDGPU_IB_POOL_SIZE must be a power of 2 */ |
@@ -92,6 +98,9 @@ extern int amdgpu_vm_block_size; | |||
92 | #define AMDGPU_MAX_COMPUTE_RINGS 8 | 98 | #define AMDGPU_MAX_COMPUTE_RINGS 8 |
93 | #define AMDGPU_MAX_VCE_RINGS 2 | 99 | #define AMDGPU_MAX_VCE_RINGS 2 |
94 | 100 | ||
101 | /* max number of IP instances */ | ||
102 | #define AMDGPU_MAX_SDMA_INSTANCES 2 | ||
103 | |||
95 | /* number of hw syncs before falling back on blocking */ | 104 | /* number of hw syncs before falling back on blocking */ |
96 | #define AMDGPU_NUM_SYNCS 4 | 105 | #define AMDGPU_NUM_SYNCS 4 |
97 | 106 | ||
@@ -177,7 +186,9 @@ struct amdgpu_vm; | |||
177 | struct amdgpu_ring; | 186 | struct amdgpu_ring; |
178 | struct amdgpu_semaphore; | 187 | struct amdgpu_semaphore; |
179 | struct amdgpu_cs_parser; | 188 | struct amdgpu_cs_parser; |
189 | struct amdgpu_job; | ||
180 | struct amdgpu_irq_src; | 190 | struct amdgpu_irq_src; |
191 | struct amdgpu_fpriv; | ||
181 | 192 | ||
182 | enum amdgpu_cp_irq { | 193 | enum amdgpu_cp_irq { |
183 | AMDGPU_CP_IRQ_GFX_EOP = 0, | 194 | AMDGPU_CP_IRQ_GFX_EOP = 0, |
@@ -239,7 +250,7 @@ struct amdgpu_buffer_funcs { | |||
239 | unsigned copy_num_dw; | 250 | unsigned copy_num_dw; |
240 | 251 | ||
241 | /* used for buffer migration */ | 252 | /* used for buffer migration */ |
242 | void (*emit_copy_buffer)(struct amdgpu_ring *ring, | 253 | void (*emit_copy_buffer)(struct amdgpu_ib *ib, |
243 | /* src addr in bytes */ | 254 | /* src addr in bytes */ |
244 | uint64_t src_offset, | 255 | uint64_t src_offset, |
245 | /* dst addr in bytes */ | 256 | /* dst addr in bytes */ |
@@ -254,7 +265,7 @@ struct amdgpu_buffer_funcs { | |||
254 | unsigned fill_num_dw; | 265 | unsigned fill_num_dw; |
255 | 266 | ||
256 | /* used for buffer clearing */ | 267 | /* used for buffer clearing */ |
257 | void (*emit_fill_buffer)(struct amdgpu_ring *ring, | 268 | void (*emit_fill_buffer)(struct amdgpu_ib *ib, |
258 | /* value to write to memory */ | 269 | /* value to write to memory */ |
259 | uint32_t src_data, | 270 | uint32_t src_data, |
260 | /* dst addr in bytes */ | 271 | /* dst addr in bytes */ |
@@ -332,6 +343,8 @@ struct amdgpu_ring_funcs { | |||
332 | int (*test_ring)(struct amdgpu_ring *ring); | 343 | int (*test_ring)(struct amdgpu_ring *ring); |
333 | int (*test_ib)(struct amdgpu_ring *ring); | 344 | int (*test_ib)(struct amdgpu_ring *ring); |
334 | bool (*is_lockup)(struct amdgpu_ring *ring); | 345 | bool (*is_lockup)(struct amdgpu_ring *ring); |
346 | /* insert NOP packets */ | ||
347 | void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); | ||
335 | }; | 348 | }; |
336 | 349 | ||
337 | /* | 350 | /* |
@@ -381,10 +394,10 @@ struct amdgpu_fence_driver { | |||
381 | uint64_t sync_seq[AMDGPU_MAX_RINGS]; | 394 | uint64_t sync_seq[AMDGPU_MAX_RINGS]; |
382 | atomic64_t last_seq; | 395 | atomic64_t last_seq; |
383 | bool initialized; | 396 | bool initialized; |
384 | bool delayed_irq; | ||
385 | struct amdgpu_irq_src *irq_src; | 397 | struct amdgpu_irq_src *irq_src; |
386 | unsigned irq_type; | 398 | unsigned irq_type; |
387 | struct delayed_work lockup_work; | 399 | struct delayed_work lockup_work; |
400 | wait_queue_head_t fence_queue; | ||
388 | }; | 401 | }; |
389 | 402 | ||
390 | /* some special values for the owner field */ | 403 | /* some special values for the owner field */ |
@@ -423,20 +436,20 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); | |||
423 | int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, | 436 | int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, |
424 | struct amdgpu_irq_src *irq_src, | 437 | struct amdgpu_irq_src *irq_src, |
425 | unsigned irq_type); | 438 | unsigned irq_type); |
439 | void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); | ||
440 | void amdgpu_fence_driver_resume(struct amdgpu_device *adev); | ||
426 | int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, | 441 | int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, |
427 | struct amdgpu_fence **fence); | 442 | struct amdgpu_fence **fence); |
428 | int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner, | ||
429 | uint64_t seq, struct amdgpu_fence **fence); | ||
430 | void amdgpu_fence_process(struct amdgpu_ring *ring); | 443 | void amdgpu_fence_process(struct amdgpu_ring *ring); |
431 | int amdgpu_fence_wait_next(struct amdgpu_ring *ring); | 444 | int amdgpu_fence_wait_next(struct amdgpu_ring *ring); |
432 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); | 445 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); |
433 | unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); | 446 | unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); |
434 | 447 | ||
435 | bool amdgpu_fence_signaled(struct amdgpu_fence *fence); | 448 | signed long amdgpu_fence_wait_any(struct amdgpu_device *adev, |
436 | int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible); | 449 | struct fence **array, |
437 | int amdgpu_fence_wait_any(struct amdgpu_device *adev, | 450 | uint32_t count, |
438 | struct amdgpu_fence **fences, | 451 | bool intr, |
439 | bool intr); | 452 | signed long t); |
440 | struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence); | 453 | struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence); |
441 | void amdgpu_fence_unref(struct amdgpu_fence **fence); | 454 | void amdgpu_fence_unref(struct amdgpu_fence **fence); |
442 | 455 | ||
@@ -481,7 +494,7 @@ static inline bool amdgpu_fence_is_earlier(struct amdgpu_fence *a, | |||
481 | return a->seq < b->seq; | 494 | return a->seq < b->seq; |
482 | } | 495 | } |
483 | 496 | ||
484 | int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user, | 497 | int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user, |
485 | void *owner, struct amdgpu_fence **fence); | 498 | void *owner, struct amdgpu_fence **fence); |
486 | 499 | ||
487 | /* | 500 | /* |
@@ -509,7 +522,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, | |||
509 | uint64_t dst_offset, | 522 | uint64_t dst_offset, |
510 | uint32_t byte_count, | 523 | uint32_t byte_count, |
511 | struct reservation_object *resv, | 524 | struct reservation_object *resv, |
512 | struct amdgpu_fence **fence); | 525 | struct fence **fence); |
513 | int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); | 526 | int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); |
514 | 527 | ||
515 | struct amdgpu_bo_list_entry { | 528 | struct amdgpu_bo_list_entry { |
@@ -532,14 +545,16 @@ struct amdgpu_bo_va_mapping { | |||
532 | struct amdgpu_bo_va { | 545 | struct amdgpu_bo_va { |
533 | /* protected by bo being reserved */ | 546 | /* protected by bo being reserved */ |
534 | struct list_head bo_list; | 547 | struct list_head bo_list; |
535 | uint64_t addr; | 548 | struct fence *last_pt_update; |
536 | struct amdgpu_fence *last_pt_update; | ||
537 | unsigned ref_count; | 549 | unsigned ref_count; |
538 | 550 | ||
539 | /* protected by vm mutex */ | 551 | /* protected by vm mutex and spinlock */ |
540 | struct list_head mappings; | ||
541 | struct list_head vm_status; | 552 | struct list_head vm_status; |
542 | 553 | ||
554 | /* mappings for this bo_va */ | ||
555 | struct list_head invalids; | ||
556 | struct list_head valids; | ||
557 | |||
543 | /* constant after initialization */ | 558 | /* constant after initialization */ |
544 | struct amdgpu_vm *vm; | 559 | struct amdgpu_vm *vm; |
545 | struct amdgpu_bo *bo; | 560 | struct amdgpu_bo *bo; |
@@ -643,7 +658,7 @@ struct amdgpu_sa_bo { | |||
643 | struct amdgpu_sa_manager *manager; | 658 | struct amdgpu_sa_manager *manager; |
644 | unsigned soffset; | 659 | unsigned soffset; |
645 | unsigned eoffset; | 660 | unsigned eoffset; |
646 | struct amdgpu_fence *fence; | 661 | struct fence *fence; |
647 | }; | 662 | }; |
648 | 663 | ||
649 | /* | 664 | /* |
@@ -685,7 +700,7 @@ bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring, | |||
685 | struct amdgpu_semaphore *semaphore); | 700 | struct amdgpu_semaphore *semaphore); |
686 | void amdgpu_semaphore_free(struct amdgpu_device *adev, | 701 | void amdgpu_semaphore_free(struct amdgpu_device *adev, |
687 | struct amdgpu_semaphore **semaphore, | 702 | struct amdgpu_semaphore **semaphore, |
688 | struct amdgpu_fence *fence); | 703 | struct fence *fence); |
689 | 704 | ||
690 | /* | 705 | /* |
691 | * Synchronization | 706 | * Synchronization |
@@ -693,20 +708,23 @@ void amdgpu_semaphore_free(struct amdgpu_device *adev, | |||
693 | struct amdgpu_sync { | 708 | struct amdgpu_sync { |
694 | struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS]; | 709 | struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS]; |
695 | struct amdgpu_fence *sync_to[AMDGPU_MAX_RINGS]; | 710 | struct amdgpu_fence *sync_to[AMDGPU_MAX_RINGS]; |
696 | struct amdgpu_fence *last_vm_update; | 711 | DECLARE_HASHTABLE(fences, 4); |
712 | struct fence *last_vm_update; | ||
697 | }; | 713 | }; |
698 | 714 | ||
699 | void amdgpu_sync_create(struct amdgpu_sync *sync); | 715 | void amdgpu_sync_create(struct amdgpu_sync *sync); |
700 | void amdgpu_sync_fence(struct amdgpu_sync *sync, | 716 | int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, |
701 | struct amdgpu_fence *fence); | 717 | struct fence *f); |
702 | int amdgpu_sync_resv(struct amdgpu_device *adev, | 718 | int amdgpu_sync_resv(struct amdgpu_device *adev, |
703 | struct amdgpu_sync *sync, | 719 | struct amdgpu_sync *sync, |
704 | struct reservation_object *resv, | 720 | struct reservation_object *resv, |
705 | void *owner); | 721 | void *owner); |
706 | int amdgpu_sync_rings(struct amdgpu_sync *sync, | 722 | int amdgpu_sync_rings(struct amdgpu_sync *sync, |
707 | struct amdgpu_ring *ring); | 723 | struct amdgpu_ring *ring); |
724 | struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); | ||
725 | int amdgpu_sync_wait(struct amdgpu_sync *sync); | ||
708 | void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync, | 726 | void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync, |
709 | struct amdgpu_fence *fence); | 727 | struct fence *fence); |
710 | 728 | ||
711 | /* | 729 | /* |
712 | * GART structures, functions & helpers | 730 | * GART structures, functions & helpers |
@@ -821,7 +839,9 @@ struct amdgpu_flip_work { | |||
821 | uint64_t base; | 839 | uint64_t base; |
822 | struct drm_pending_vblank_event *event; | 840 | struct drm_pending_vblank_event *event; |
823 | struct amdgpu_bo *old_rbo; | 841 | struct amdgpu_bo *old_rbo; |
824 | struct fence *fence; | 842 | struct fence *excl; |
843 | unsigned shared_count; | ||
844 | struct fence **shared; | ||
825 | }; | 845 | }; |
826 | 846 | ||
827 | 847 | ||
@@ -844,6 +864,8 @@ struct amdgpu_ib { | |||
844 | uint32_t gws_base, gws_size; | 864 | uint32_t gws_base, gws_size; |
845 | uint32_t oa_base, oa_size; | 865 | uint32_t oa_base, oa_size; |
846 | uint32_t flags; | 866 | uint32_t flags; |
867 | /* resulting sequence number */ | ||
868 | uint64_t sequence; | ||
847 | }; | 869 | }; |
848 | 870 | ||
849 | enum amdgpu_ring_type { | 871 | enum amdgpu_ring_type { |
@@ -854,11 +876,23 @@ enum amdgpu_ring_type { | |||
854 | AMDGPU_RING_TYPE_VCE | 876 | AMDGPU_RING_TYPE_VCE |
855 | }; | 877 | }; |
856 | 878 | ||
879 | extern struct amd_sched_backend_ops amdgpu_sched_ops; | ||
880 | |||
881 | int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, | ||
882 | struct amdgpu_ring *ring, | ||
883 | struct amdgpu_ib *ibs, | ||
884 | unsigned num_ibs, | ||
885 | int (*free_job)(struct amdgpu_job *), | ||
886 | void *owner, | ||
887 | struct fence **fence); | ||
888 | |||
857 | struct amdgpu_ring { | 889 | struct amdgpu_ring { |
858 | struct amdgpu_device *adev; | 890 | struct amdgpu_device *adev; |
859 | const struct amdgpu_ring_funcs *funcs; | 891 | const struct amdgpu_ring_funcs *funcs; |
860 | struct amdgpu_fence_driver fence_drv; | 892 | struct amdgpu_fence_driver fence_drv; |
893 | struct amd_gpu_scheduler *scheduler; | ||
861 | 894 | ||
895 | spinlock_t fence_lock; | ||
862 | struct mutex *ring_lock; | 896 | struct mutex *ring_lock; |
863 | struct amdgpu_bo *ring_obj; | 897 | struct amdgpu_bo *ring_obj; |
864 | volatile uint32_t *ring; | 898 | volatile uint32_t *ring; |
@@ -892,6 +926,7 @@ struct amdgpu_ring { | |||
892 | struct amdgpu_ctx *current_ctx; | 926 | struct amdgpu_ctx *current_ctx; |
893 | enum amdgpu_ring_type type; | 927 | enum amdgpu_ring_type type; |
894 | char name[16]; | 928 | char name[16]; |
929 | bool is_pte_ring; | ||
895 | }; | 930 | }; |
896 | 931 | ||
897 | /* | 932 | /* |
@@ -933,7 +968,7 @@ struct amdgpu_vm_id { | |||
933 | unsigned id; | 968 | unsigned id; |
934 | uint64_t pd_gpu_addr; | 969 | uint64_t pd_gpu_addr; |
935 | /* last flushed PD/PT update */ | 970 | /* last flushed PD/PT update */ |
936 | struct amdgpu_fence *flushed_updates; | 971 | struct fence *flushed_updates; |
937 | /* last use of vmid */ | 972 | /* last use of vmid */ |
938 | struct amdgpu_fence *last_id_use; | 973 | struct amdgpu_fence *last_id_use; |
939 | }; | 974 | }; |
@@ -943,18 +978,22 @@ struct amdgpu_vm { | |||
943 | 978 | ||
944 | struct rb_root va; | 979 | struct rb_root va; |
945 | 980 | ||
946 | /* protecting invalidated and freed */ | 981 | /* protecting invalidated */ |
947 | spinlock_t status_lock; | 982 | spinlock_t status_lock; |
948 | 983 | ||
949 | /* BOs moved, but not yet updated in the PT */ | 984 | /* BOs moved, but not yet updated in the PT */ |
950 | struct list_head invalidated; | 985 | struct list_head invalidated; |
951 | 986 | ||
952 | /* BOs freed, but not yet updated in the PT */ | 987 | /* BOs cleared in the PT because of a move */ |
988 | struct list_head cleared; | ||
989 | |||
990 | /* BO mappings freed, but not yet updated in the PT */ | ||
953 | struct list_head freed; | 991 | struct list_head freed; |
954 | 992 | ||
955 | /* contains the page directory */ | 993 | /* contains the page directory */ |
956 | struct amdgpu_bo *page_directory; | 994 | struct amdgpu_bo *page_directory; |
957 | unsigned max_pde_used; | 995 | unsigned max_pde_used; |
996 | struct fence *page_directory_fence; | ||
958 | 997 | ||
959 | /* array of page tables, one for each page directory entry */ | 998 | /* array of page tables, one for each page directory entry */ |
960 | struct amdgpu_vm_pt *page_tables; | 999 | struct amdgpu_vm_pt *page_tables; |
@@ -983,27 +1022,47 @@ struct amdgpu_vm_manager { | |||
983 | * context related structures | 1022 | * context related structures |
984 | */ | 1023 | */ |
985 | 1024 | ||
986 | struct amdgpu_ctx_state { | 1025 | #define AMDGPU_CTX_MAX_CS_PENDING 16 |
987 | uint64_t flags; | 1026 | |
988 | uint32_t hangs; | 1027 | struct amdgpu_ctx_ring { |
1028 | uint64_t sequence; | ||
1029 | struct fence *fences[AMDGPU_CTX_MAX_CS_PENDING]; | ||
1030 | struct amd_sched_entity entity; | ||
989 | }; | 1031 | }; |
990 | 1032 | ||
991 | struct amdgpu_ctx { | 1033 | struct amdgpu_ctx { |
992 | /* call kref_get()before CS start and kref_put() after CS fence signaled */ | 1034 | struct kref refcount; |
993 | struct kref refcount; | 1035 | struct amdgpu_device *adev; |
994 | struct amdgpu_fpriv *fpriv; | 1036 | unsigned reset_counter; |
995 | struct amdgpu_ctx_state state; | 1037 | spinlock_t ring_lock; |
996 | uint32_t id; | 1038 | struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; |
997 | unsigned reset_counter; | ||
998 | }; | 1039 | }; |
999 | 1040 | ||
1000 | struct amdgpu_ctx_mgr { | 1041 | struct amdgpu_ctx_mgr { |
1001 | struct amdgpu_device *adev; | 1042 | struct amdgpu_device *adev; |
1002 | struct idr ctx_handles; | 1043 | struct mutex lock; |
1003 | /* lock for IDR system */ | 1044 | /* protected by lock */ |
1004 | struct mutex lock; | 1045 | struct idr ctx_handles; |
1005 | }; | 1046 | }; |
1006 | 1047 | ||
1048 | int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, | ||
1049 | struct amdgpu_ctx *ctx); | ||
1050 | void amdgpu_ctx_fini(struct amdgpu_ctx *ctx); | ||
1051 | |||
1052 | struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); | ||
1053 | int amdgpu_ctx_put(struct amdgpu_ctx *ctx); | ||
1054 | |||
1055 | uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, | ||
1056 | struct fence *fence); | ||
1057 | struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, | ||
1058 | struct amdgpu_ring *ring, uint64_t seq); | ||
1059 | |||
1060 | int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, | ||
1061 | struct drm_file *filp); | ||
1062 | |||
1063 | void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr); | ||
1064 | void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); | ||
1065 | |||
1007 | /* | 1066 | /* |
1008 | * file private structure | 1067 | * file private structure |
1009 | */ | 1068 | */ |
@@ -1012,7 +1071,7 @@ struct amdgpu_fpriv { | |||
1012 | struct amdgpu_vm vm; | 1071 | struct amdgpu_vm vm; |
1013 | struct mutex bo_list_lock; | 1072 | struct mutex bo_list_lock; |
1014 | struct idr bo_list_handles; | 1073 | struct idr bo_list_handles; |
1015 | struct amdgpu_ctx_mgr ctx_mgr; | 1074 | struct amdgpu_ctx_mgr ctx_mgr; |
1016 | }; | 1075 | }; |
1017 | 1076 | ||
1018 | /* | 1077 | /* |
@@ -1130,6 +1189,9 @@ struct amdgpu_gfx { | |||
1130 | uint32_t me_feature_version; | 1189 | uint32_t me_feature_version; |
1131 | uint32_t ce_feature_version; | 1190 | uint32_t ce_feature_version; |
1132 | uint32_t pfp_feature_version; | 1191 | uint32_t pfp_feature_version; |
1192 | uint32_t rlc_feature_version; | ||
1193 | uint32_t mec_feature_version; | ||
1194 | uint32_t mec2_feature_version; | ||
1133 | struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; | 1195 | struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; |
1134 | unsigned num_gfx_rings; | 1196 | unsigned num_gfx_rings; |
1135 | struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; | 1197 | struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; |
@@ -1157,6 +1219,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev); | |||
1157 | void amdgpu_ring_free_size(struct amdgpu_ring *ring); | 1219 | void amdgpu_ring_free_size(struct amdgpu_ring *ring); |
1158 | int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); | 1220 | int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); |
1159 | int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw); | 1221 | int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw); |
1222 | void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); | ||
1160 | void amdgpu_ring_commit(struct amdgpu_ring *ring); | 1223 | void amdgpu_ring_commit(struct amdgpu_ring *ring); |
1161 | void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring); | 1224 | void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring); |
1162 | void amdgpu_ring_undo(struct amdgpu_ring *ring); | 1225 | void amdgpu_ring_undo(struct amdgpu_ring *ring); |
@@ -1204,6 +1267,16 @@ struct amdgpu_cs_parser { | |||
1204 | struct amdgpu_user_fence uf; | 1267 | struct amdgpu_user_fence uf; |
1205 | }; | 1268 | }; |
1206 | 1269 | ||
1270 | struct amdgpu_job { | ||
1271 | struct amd_sched_job base; | ||
1272 | struct amdgpu_device *adev; | ||
1273 | struct amdgpu_ib *ibs; | ||
1274 | uint32_t num_ibs; | ||
1275 | struct mutex job_lock; | ||
1276 | struct amdgpu_user_fence uf; | ||
1277 | int (*free_job)(struct amdgpu_job *sched_job); | ||
1278 | }; | ||
1279 | |||
1207 | static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) | 1280 | static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) |
1208 | { | 1281 | { |
1209 | return p->ibs[ib_idx].ptr[idx]; | 1282 | return p->ibs[ib_idx].ptr[idx]; |
@@ -1598,7 +1671,6 @@ struct amdgpu_uvd { | |||
1598 | struct amdgpu_bo *vcpu_bo; | 1671 | struct amdgpu_bo *vcpu_bo; |
1599 | void *cpu_addr; | 1672 | void *cpu_addr; |
1600 | uint64_t gpu_addr; | 1673 | uint64_t gpu_addr; |
1601 | void *saved_bo; | ||
1602 | atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; | 1674 | atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; |
1603 | struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; | 1675 | struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; |
1604 | struct delayed_work idle_work; | 1676 | struct delayed_work idle_work; |
@@ -1614,6 +1686,9 @@ struct amdgpu_uvd { | |||
1614 | #define AMDGPU_MAX_VCE_HANDLES 16 | 1686 | #define AMDGPU_MAX_VCE_HANDLES 16 |
1615 | #define AMDGPU_VCE_FIRMWARE_OFFSET 256 | 1687 | #define AMDGPU_VCE_FIRMWARE_OFFSET 256 |
1616 | 1688 | ||
1689 | #define AMDGPU_VCE_HARVEST_VCE0 (1 << 0) | ||
1690 | #define AMDGPU_VCE_HARVEST_VCE1 (1 << 1) | ||
1691 | |||
1617 | struct amdgpu_vce { | 1692 | struct amdgpu_vce { |
1618 | struct amdgpu_bo *vcpu_bo; | 1693 | struct amdgpu_bo *vcpu_bo; |
1619 | uint64_t gpu_addr; | 1694 | uint64_t gpu_addr; |
@@ -1626,6 +1701,7 @@ struct amdgpu_vce { | |||
1626 | const struct firmware *fw; /* VCE firmware */ | 1701 | const struct firmware *fw; /* VCE firmware */ |
1627 | struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; | 1702 | struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; |
1628 | struct amdgpu_irq_src irq; | 1703 | struct amdgpu_irq_src irq; |
1704 | unsigned harvest_config; | ||
1629 | }; | 1705 | }; |
1630 | 1706 | ||
1631 | /* | 1707 | /* |
@@ -1635,8 +1711,10 @@ struct amdgpu_sdma { | |||
1635 | /* SDMA firmware */ | 1711 | /* SDMA firmware */ |
1636 | const struct firmware *fw; | 1712 | const struct firmware *fw; |
1637 | uint32_t fw_version; | 1713 | uint32_t fw_version; |
1714 | uint32_t feature_version; | ||
1638 | 1715 | ||
1639 | struct amdgpu_ring ring; | 1716 | struct amdgpu_ring ring; |
1717 | bool burst_nop; | ||
1640 | }; | 1718 | }; |
1641 | 1719 | ||
1642 | /* | 1720 | /* |
@@ -1841,17 +1919,12 @@ struct amdgpu_atcs { | |||
1841 | struct amdgpu_atcs_functions functions; | 1919 | struct amdgpu_atcs_functions functions; |
1842 | }; | 1920 | }; |
1843 | 1921 | ||
1844 | int amdgpu_ctx_alloc(struct amdgpu_device *adev,struct amdgpu_fpriv *fpriv, | 1922 | /* |
1845 | uint32_t *id,uint32_t flags); | 1923 | * CGS |
1846 | int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, | 1924 | */ |
1847 | uint32_t id); | 1925 | void *amdgpu_cgs_create_device(struct amdgpu_device *adev); |
1848 | 1926 | void amdgpu_cgs_destroy_device(void *cgs_device); | |
1849 | void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv); | ||
1850 | struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); | ||
1851 | int amdgpu_ctx_put(struct amdgpu_ctx *ctx); | ||
1852 | 1927 | ||
1853 | extern int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, | ||
1854 | struct drm_file *filp); | ||
1855 | 1928 | ||
1856 | /* | 1929 | /* |
1857 | * Core structure, functions and helpers. | 1930 | * Core structure, functions and helpers. |
@@ -1862,6 +1935,12 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); | |||
1862 | typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); | 1935 | typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); |
1863 | typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); | 1936 | typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); |
1864 | 1937 | ||
1938 | struct amdgpu_ip_block_status { | ||
1939 | bool valid; | ||
1940 | bool sw; | ||
1941 | bool hw; | ||
1942 | }; | ||
1943 | |||
1865 | struct amdgpu_device { | 1944 | struct amdgpu_device { |
1866 | struct device *dev; | 1945 | struct device *dev; |
1867 | struct drm_device *ddev; | 1946 | struct drm_device *ddev; |
@@ -1869,7 +1948,7 @@ struct amdgpu_device { | |||
1869 | struct rw_semaphore exclusive_lock; | 1948 | struct rw_semaphore exclusive_lock; |
1870 | 1949 | ||
1871 | /* ASIC */ | 1950 | /* ASIC */ |
1872 | enum amdgpu_asic_type asic_type; | 1951 | enum amd_asic_type asic_type; |
1873 | uint32_t family; | 1952 | uint32_t family; |
1874 | uint32_t rev_id; | 1953 | uint32_t rev_id; |
1875 | uint32_t external_rev_id; | 1954 | uint32_t external_rev_id; |
@@ -1962,7 +2041,6 @@ struct amdgpu_device { | |||
1962 | struct amdgpu_irq_src hpd_irq; | 2041 | struct amdgpu_irq_src hpd_irq; |
1963 | 2042 | ||
1964 | /* rings */ | 2043 | /* rings */ |
1965 | wait_queue_head_t fence_queue; | ||
1966 | unsigned fence_context; | 2044 | unsigned fence_context; |
1967 | struct mutex ring_lock; | 2045 | struct mutex ring_lock; |
1968 | unsigned num_rings; | 2046 | unsigned num_rings; |
@@ -1985,7 +2063,7 @@ struct amdgpu_device { | |||
1985 | struct amdgpu_gfx gfx; | 2063 | struct amdgpu_gfx gfx; |
1986 | 2064 | ||
1987 | /* sdma */ | 2065 | /* sdma */ |
1988 | struct amdgpu_sdma sdma[2]; | 2066 | struct amdgpu_sdma sdma[AMDGPU_MAX_SDMA_INSTANCES]; |
1989 | struct amdgpu_irq_src sdma_trap_irq; | 2067 | struct amdgpu_irq_src sdma_trap_irq; |
1990 | struct amdgpu_irq_src sdma_illegal_inst_irq; | 2068 | struct amdgpu_irq_src sdma_illegal_inst_irq; |
1991 | 2069 | ||
@@ -2004,13 +2082,19 @@ struct amdgpu_device { | |||
2004 | 2082 | ||
2005 | const struct amdgpu_ip_block_version *ip_blocks; | 2083 | const struct amdgpu_ip_block_version *ip_blocks; |
2006 | int num_ip_blocks; | 2084 | int num_ip_blocks; |
2007 | bool *ip_block_enabled; | 2085 | struct amdgpu_ip_block_status *ip_block_status; |
2008 | struct mutex mn_lock; | 2086 | struct mutex mn_lock; |
2009 | DECLARE_HASHTABLE(mn_hash, 7); | 2087 | DECLARE_HASHTABLE(mn_hash, 7); |
2010 | 2088 | ||
2011 | /* tracking pinned memory */ | 2089 | /* tracking pinned memory */ |
2012 | u64 vram_pin_size; | 2090 | u64 vram_pin_size; |
2013 | u64 gart_pin_size; | 2091 | u64 gart_pin_size; |
2092 | |||
2093 | /* amdkfd interface */ | ||
2094 | struct kfd_dev *kfd; | ||
2095 | |||
2096 | /* kernel conext for IB submission */ | ||
2097 | struct amdgpu_ctx kernel_ctx; | ||
2014 | }; | 2098 | }; |
2015 | 2099 | ||
2016 | bool amdgpu_device_is_px(struct drm_device *dev); | 2100 | bool amdgpu_device_is_px(struct drm_device *dev); |
@@ -2118,6 +2202,21 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) | |||
2118 | ring->ring_free_dw--; | 2202 | ring->ring_free_dw--; |
2119 | } | 2203 | } |
2120 | 2204 | ||
2205 | static inline struct amdgpu_sdma * amdgpu_get_sdma_instance(struct amdgpu_ring *ring) | ||
2206 | { | ||
2207 | struct amdgpu_device *adev = ring->adev; | ||
2208 | int i; | ||
2209 | |||
2210 | for (i = 0; i < AMDGPU_MAX_SDMA_INSTANCES; i++) | ||
2211 | if (&adev->sdma[i].ring == ring) | ||
2212 | break; | ||
2213 | |||
2214 | if (i < AMDGPU_MAX_SDMA_INSTANCES) | ||
2215 | return &adev->sdma[i]; | ||
2216 | else | ||
2217 | return NULL; | ||
2218 | } | ||
2219 | |||
2121 | /* | 2220 | /* |
2122 | * ASICs macro. | 2221 | * ASICs macro. |
2123 | */ | 2222 | */ |
@@ -2169,8 +2268,8 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) | |||
2169 | #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) | 2268 | #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) |
2170 | #define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s)) | 2269 | #define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s)) |
2171 | #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s)) | 2270 | #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s)) |
2172 | #define amdgpu_emit_copy_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((r), (s), (d), (b)) | 2271 | #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) |
2173 | #define amdgpu_emit_fill_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((r), (s), (d), (b)) | 2272 | #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) |
2174 | #define amdgpu_dpm_get_temperature(adev) (adev)->pm.funcs->get_temperature((adev)) | 2273 | #define amdgpu_dpm_get_temperature(adev) (adev)->pm.funcs->get_temperature((adev)) |
2175 | #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev)) | 2274 | #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev)) |
2176 | #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev)) | 2275 | #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev)) |
@@ -2198,6 +2297,12 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev); | |||
2198 | bool amdgpu_card_posted(struct amdgpu_device *adev); | 2297 | bool amdgpu_card_posted(struct amdgpu_device *adev); |
2199 | void amdgpu_update_display_priority(struct amdgpu_device *adev); | 2298 | void amdgpu_update_display_priority(struct amdgpu_device *adev); |
2200 | bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); | 2299 | bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); |
2300 | struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, | ||
2301 | struct drm_file *filp, | ||
2302 | struct amdgpu_ctx *ctx, | ||
2303 | struct amdgpu_ib *ibs, | ||
2304 | uint32_t num_ibs); | ||
2305 | |||
2201 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); | 2306 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); |
2202 | int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, | 2307 | int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, |
2203 | u32 ip_instance, u32 ring, | 2308 | u32 ip_instance, u32 ring, |
@@ -2261,11 +2366,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); | |||
2261 | struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, | 2366 | struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, |
2262 | struct amdgpu_vm *vm, | 2367 | struct amdgpu_vm *vm, |
2263 | struct list_head *head); | 2368 | struct list_head *head); |
2264 | struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring, | 2369 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, |
2265 | struct amdgpu_vm *vm); | 2370 | struct amdgpu_sync *sync); |
2266 | void amdgpu_vm_flush(struct amdgpu_ring *ring, | 2371 | void amdgpu_vm_flush(struct amdgpu_ring *ring, |
2267 | struct amdgpu_vm *vm, | 2372 | struct amdgpu_vm *vm, |
2268 | struct amdgpu_fence *updates); | 2373 | struct fence *updates); |
2269 | void amdgpu_vm_fence(struct amdgpu_device *adev, | 2374 | void amdgpu_vm_fence(struct amdgpu_device *adev, |
2270 | struct amdgpu_vm *vm, | 2375 | struct amdgpu_vm *vm, |
2271 | struct amdgpu_fence *fence); | 2376 | struct amdgpu_fence *fence); |
@@ -2295,7 +2400,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | |||
2295 | uint64_t addr); | 2400 | uint64_t addr); |
2296 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | 2401 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, |
2297 | struct amdgpu_bo_va *bo_va); | 2402 | struct amdgpu_bo_va *bo_va); |
2298 | 2403 | int amdgpu_vm_free_job(struct amdgpu_job *job); | |
2299 | /* | 2404 | /* |
2300 | * functions used by amdgpu_encoder.c | 2405 | * functions used by amdgpu_encoder.c |
2301 | */ | 2406 | */ |