diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu.h')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 852 |
1 files changed, 48 insertions, 804 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 039b57e4644c..217df2459a98 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -53,7 +53,11 @@ | |||
53 | #include "amdgpu_ucode.h" | 53 | #include "amdgpu_ucode.h" |
54 | #include "amdgpu_ttm.h" | 54 | #include "amdgpu_ttm.h" |
55 | #include "amdgpu_gds.h" | 55 | #include "amdgpu_gds.h" |
56 | #include "amdgpu_sync.h" | ||
57 | #include "amdgpu_ring.h" | ||
58 | #include "amdgpu_vm.h" | ||
56 | #include "amd_powerplay.h" | 59 | #include "amd_powerplay.h" |
60 | #include "amdgpu_dpm.h" | ||
57 | #include "amdgpu_acp.h" | 61 | #include "amdgpu_acp.h" |
58 | 62 | ||
59 | #include "gpu_scheduler.h" | 63 | #include "gpu_scheduler.h" |
@@ -97,6 +101,7 @@ extern char *amdgpu_disable_cu; | |||
97 | extern int amdgpu_sclk_deep_sleep_en; | 101 | extern int amdgpu_sclk_deep_sleep_en; |
98 | extern char *amdgpu_virtual_display; | 102 | extern char *amdgpu_virtual_display; |
99 | extern unsigned amdgpu_pp_feature_mask; | 103 | extern unsigned amdgpu_pp_feature_mask; |
104 | extern int amdgpu_vram_page_split; | ||
100 | 105 | ||
101 | #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 | 106 | #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 |
102 | #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ | 107 | #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ |
@@ -107,12 +112,6 @@ extern unsigned amdgpu_pp_feature_mask; | |||
107 | #define AMDGPUFB_CONN_LIMIT 4 | 112 | #define AMDGPUFB_CONN_LIMIT 4 |
108 | #define AMDGPU_BIOS_NUM_SCRATCH 8 | 113 | #define AMDGPU_BIOS_NUM_SCRATCH 8 |
109 | 114 | ||
110 | /* max number of rings */ | ||
111 | #define AMDGPU_MAX_RINGS 16 | ||
112 | #define AMDGPU_MAX_GFX_RINGS 1 | ||
113 | #define AMDGPU_MAX_COMPUTE_RINGS 8 | ||
114 | #define AMDGPU_MAX_VCE_RINGS 3 | ||
115 | |||
116 | /* max number of IP instances */ | 115 | /* max number of IP instances */ |
117 | #define AMDGPU_MAX_SDMA_INSTANCES 2 | 116 | #define AMDGPU_MAX_SDMA_INSTANCES 2 |
118 | 117 | ||
@@ -152,8 +151,6 @@ extern unsigned amdgpu_pp_feature_mask; | |||
152 | 151 | ||
153 | struct amdgpu_device; | 152 | struct amdgpu_device; |
154 | struct amdgpu_ib; | 153 | struct amdgpu_ib; |
155 | struct amdgpu_vm; | ||
156 | struct amdgpu_ring; | ||
157 | struct amdgpu_cs_parser; | 154 | struct amdgpu_cs_parser; |
158 | struct amdgpu_job; | 155 | struct amdgpu_job; |
159 | struct amdgpu_irq_src; | 156 | struct amdgpu_irq_src; |
@@ -198,21 +195,38 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev, | |||
198 | bool amdgpu_is_idle(struct amdgpu_device *adev, | 195 | bool amdgpu_is_idle(struct amdgpu_device *adev, |
199 | enum amd_ip_block_type block_type); | 196 | enum amd_ip_block_type block_type); |
200 | 197 | ||
198 | #define AMDGPU_MAX_IP_NUM 16 | ||
199 | |||
200 | struct amdgpu_ip_block_status { | ||
201 | bool valid; | ||
202 | bool sw; | ||
203 | bool hw; | ||
204 | bool late_initialized; | ||
205 | bool hang; | ||
206 | }; | ||
207 | |||
201 | struct amdgpu_ip_block_version { | 208 | struct amdgpu_ip_block_version { |
202 | enum amd_ip_block_type type; | 209 | const enum amd_ip_block_type type; |
203 | u32 major; | 210 | const u32 major; |
204 | u32 minor; | 211 | const u32 minor; |
205 | u32 rev; | 212 | const u32 rev; |
206 | const struct amd_ip_funcs *funcs; | 213 | const struct amd_ip_funcs *funcs; |
207 | }; | 214 | }; |
208 | 215 | ||
216 | struct amdgpu_ip_block { | ||
217 | struct amdgpu_ip_block_status status; | ||
218 | const struct amdgpu_ip_block_version *version; | ||
219 | }; | ||
220 | |||
209 | int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, | 221 | int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, |
210 | enum amd_ip_block_type type, | 222 | enum amd_ip_block_type type, |
211 | u32 major, u32 minor); | 223 | u32 major, u32 minor); |
212 | 224 | ||
213 | const struct amdgpu_ip_block_version * amdgpu_get_ip_block( | 225 | struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev, |
214 | struct amdgpu_device *adev, | 226 | enum amd_ip_block_type type); |
215 | enum amd_ip_block_type type); | 227 | |
228 | int amdgpu_ip_block_add(struct amdgpu_device *adev, | ||
229 | const struct amdgpu_ip_block_version *ip_block_version); | ||
216 | 230 | ||
217 | /* provided by hw blocks that can move/clear data. e.g., gfx or sdma */ | 231 | /* provided by hw blocks that can move/clear data. e.g., gfx or sdma */ |
218 | struct amdgpu_buffer_funcs { | 232 | struct amdgpu_buffer_funcs { |
@@ -286,47 +300,6 @@ struct amdgpu_ih_funcs { | |||
286 | void (*set_rptr)(struct amdgpu_device *adev); | 300 | void (*set_rptr)(struct amdgpu_device *adev); |
287 | }; | 301 | }; |
288 | 302 | ||
289 | /* provided by hw blocks that expose a ring buffer for commands */ | ||
290 | struct amdgpu_ring_funcs { | ||
291 | /* ring read/write ptr handling */ | ||
292 | u32 (*get_rptr)(struct amdgpu_ring *ring); | ||
293 | u32 (*get_wptr)(struct amdgpu_ring *ring); | ||
294 | void (*set_wptr)(struct amdgpu_ring *ring); | ||
295 | /* validating and patching of IBs */ | ||
296 | int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx); | ||
297 | /* command emit functions */ | ||
298 | void (*emit_ib)(struct amdgpu_ring *ring, | ||
299 | struct amdgpu_ib *ib, | ||
300 | unsigned vm_id, bool ctx_switch); | ||
301 | void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, | ||
302 | uint64_t seq, unsigned flags); | ||
303 | void (*emit_pipeline_sync)(struct amdgpu_ring *ring); | ||
304 | void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id, | ||
305 | uint64_t pd_addr); | ||
306 | void (*emit_hdp_flush)(struct amdgpu_ring *ring); | ||
307 | void (*emit_hdp_invalidate)(struct amdgpu_ring *ring); | ||
308 | void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid, | ||
309 | uint32_t gds_base, uint32_t gds_size, | ||
310 | uint32_t gws_base, uint32_t gws_size, | ||
311 | uint32_t oa_base, uint32_t oa_size); | ||
312 | /* testing functions */ | ||
313 | int (*test_ring)(struct amdgpu_ring *ring); | ||
314 | int (*test_ib)(struct amdgpu_ring *ring, long timeout); | ||
315 | /* insert NOP packets */ | ||
316 | void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); | ||
317 | /* pad the indirect buffer to the necessary number of dw */ | ||
318 | void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib); | ||
319 | unsigned (*init_cond_exec)(struct amdgpu_ring *ring); | ||
320 | void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset); | ||
321 | /* note usage for clock and power gating */ | ||
322 | void (*begin_use)(struct amdgpu_ring *ring); | ||
323 | void (*end_use)(struct amdgpu_ring *ring); | ||
324 | void (*emit_switch_buffer) (struct amdgpu_ring *ring); | ||
325 | void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags); | ||
326 | unsigned (*get_emit_ib_size) (struct amdgpu_ring *ring); | ||
327 | unsigned (*get_dma_frame_size) (struct amdgpu_ring *ring); | ||
328 | }; | ||
329 | |||
330 | /* | 303 | /* |
331 | * BIOS. | 304 | * BIOS. |
332 | */ | 305 | */ |
@@ -364,47 +337,6 @@ struct amdgpu_clock { | |||
364 | }; | 337 | }; |
365 | 338 | ||
366 | /* | 339 | /* |
367 | * Fences. | ||
368 | */ | ||
369 | struct amdgpu_fence_driver { | ||
370 | uint64_t gpu_addr; | ||
371 | volatile uint32_t *cpu_addr; | ||
372 | /* sync_seq is protected by ring emission lock */ | ||
373 | uint32_t sync_seq; | ||
374 | atomic_t last_seq; | ||
375 | bool initialized; | ||
376 | struct amdgpu_irq_src *irq_src; | ||
377 | unsigned irq_type; | ||
378 | struct timer_list fallback_timer; | ||
379 | unsigned num_fences_mask; | ||
380 | spinlock_t lock; | ||
381 | struct fence **fences; | ||
382 | }; | ||
383 | |||
384 | /* some special values for the owner field */ | ||
385 | #define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul) | ||
386 | #define AMDGPU_FENCE_OWNER_VM ((void*)1ul) | ||
387 | |||
388 | #define AMDGPU_FENCE_FLAG_64BIT (1 << 0) | ||
389 | #define AMDGPU_FENCE_FLAG_INT (1 << 1) | ||
390 | |||
391 | int amdgpu_fence_driver_init(struct amdgpu_device *adev); | ||
392 | void amdgpu_fence_driver_fini(struct amdgpu_device *adev); | ||
393 | void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); | ||
394 | |||
395 | int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, | ||
396 | unsigned num_hw_submission); | ||
397 | int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, | ||
398 | struct amdgpu_irq_src *irq_src, | ||
399 | unsigned irq_type); | ||
400 | void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); | ||
401 | void amdgpu_fence_driver_resume(struct amdgpu_device *adev); | ||
402 | int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence); | ||
403 | void amdgpu_fence_process(struct amdgpu_ring *ring); | ||
404 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); | ||
405 | unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); | ||
406 | |||
407 | /* | ||
408 | * BO. | 340 | * BO. |
409 | */ | 341 | */ |
410 | struct amdgpu_bo_list_entry { | 342 | struct amdgpu_bo_list_entry { |
@@ -464,7 +396,6 @@ struct amdgpu_bo { | |||
464 | */ | 396 | */ |
465 | struct list_head va; | 397 | struct list_head va; |
466 | /* Constant after initialization */ | 398 | /* Constant after initialization */ |
467 | struct amdgpu_device *adev; | ||
468 | struct drm_gem_object gem_base; | 399 | struct drm_gem_object gem_base; |
469 | struct amdgpu_bo *parent; | 400 | struct amdgpu_bo *parent; |
470 | struct amdgpu_bo *shadow; | 401 | struct amdgpu_bo *shadow; |
@@ -561,27 +492,6 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, | |||
561 | int amdgpu_mode_dumb_mmap(struct drm_file *filp, | 492 | int amdgpu_mode_dumb_mmap(struct drm_file *filp, |
562 | struct drm_device *dev, | 493 | struct drm_device *dev, |
563 | uint32_t handle, uint64_t *offset_p); | 494 | uint32_t handle, uint64_t *offset_p); |
564 | /* | ||
565 | * Synchronization | ||
566 | */ | ||
567 | struct amdgpu_sync { | ||
568 | DECLARE_HASHTABLE(fences, 4); | ||
569 | struct fence *last_vm_update; | ||
570 | }; | ||
571 | |||
572 | void amdgpu_sync_create(struct amdgpu_sync *sync); | ||
573 | int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, | ||
574 | struct fence *f); | ||
575 | int amdgpu_sync_resv(struct amdgpu_device *adev, | ||
576 | struct amdgpu_sync *sync, | ||
577 | struct reservation_object *resv, | ||
578 | void *owner); | ||
579 | struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, | ||
580 | struct amdgpu_ring *ring); | ||
581 | struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); | ||
582 | void amdgpu_sync_free(struct amdgpu_sync *sync); | ||
583 | int amdgpu_sync_init(void); | ||
584 | void amdgpu_sync_fini(void); | ||
585 | int amdgpu_fence_slab_init(void); | 495 | int amdgpu_fence_slab_init(void); |
586 | void amdgpu_fence_slab_fini(void); | 496 | void amdgpu_fence_slab_fini(void); |
587 | 497 | ||
@@ -723,14 +633,6 @@ struct amdgpu_ib { | |||
723 | uint32_t flags; | 633 | uint32_t flags; |
724 | }; | 634 | }; |
725 | 635 | ||
726 | enum amdgpu_ring_type { | ||
727 | AMDGPU_RING_TYPE_GFX, | ||
728 | AMDGPU_RING_TYPE_COMPUTE, | ||
729 | AMDGPU_RING_TYPE_SDMA, | ||
730 | AMDGPU_RING_TYPE_UVD, | ||
731 | AMDGPU_RING_TYPE_VCE | ||
732 | }; | ||
733 | |||
734 | extern const struct amd_sched_backend_ops amdgpu_sched_ops; | 636 | extern const struct amd_sched_backend_ops amdgpu_sched_ops; |
735 | 637 | ||
736 | int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, | 638 | int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, |
@@ -744,213 +646,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, | |||
744 | struct amd_sched_entity *entity, void *owner, | 646 | struct amd_sched_entity *entity, void *owner, |
745 | struct fence **f); | 647 | struct fence **f); |
746 | 648 | ||
747 | struct amdgpu_ring { | ||
748 | struct amdgpu_device *adev; | ||
749 | const struct amdgpu_ring_funcs *funcs; | ||
750 | struct amdgpu_fence_driver fence_drv; | ||
751 | struct amd_gpu_scheduler sched; | ||
752 | |||
753 | struct amdgpu_bo *ring_obj; | ||
754 | volatile uint32_t *ring; | ||
755 | unsigned rptr_offs; | ||
756 | unsigned wptr; | ||
757 | unsigned wptr_old; | ||
758 | unsigned ring_size; | ||
759 | unsigned max_dw; | ||
760 | int count_dw; | ||
761 | uint64_t gpu_addr; | ||
762 | uint32_t align_mask; | ||
763 | uint32_t ptr_mask; | ||
764 | bool ready; | ||
765 | u32 nop; | ||
766 | u32 idx; | ||
767 | u32 me; | ||
768 | u32 pipe; | ||
769 | u32 queue; | ||
770 | struct amdgpu_bo *mqd_obj; | ||
771 | u32 doorbell_index; | ||
772 | bool use_doorbell; | ||
773 | unsigned wptr_offs; | ||
774 | unsigned fence_offs; | ||
775 | uint64_t current_ctx; | ||
776 | enum amdgpu_ring_type type; | ||
777 | char name[16]; | ||
778 | unsigned cond_exe_offs; | ||
779 | u64 cond_exe_gpu_addr; | ||
780 | volatile u32 *cond_exe_cpu_addr; | ||
781 | #if defined(CONFIG_DEBUG_FS) | ||
782 | struct dentry *ent; | ||
783 | #endif | ||
784 | }; | ||
785 | |||
786 | /* | ||
787 | * VM | ||
788 | */ | ||
789 | |||
790 | /* maximum number of VMIDs */ | ||
791 | #define AMDGPU_NUM_VM 16 | ||
792 | |||
793 | /* Maximum number of PTEs the hardware can write with one command */ | ||
794 | #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF | ||
795 | |||
796 | /* number of entries in page table */ | ||
797 | #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size) | ||
798 | |||
799 | /* PTBs (Page Table Blocks) need to be aligned to 32K */ | ||
800 | #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 | ||
801 | |||
802 | /* LOG2 number of continuous pages for the fragment field */ | ||
803 | #define AMDGPU_LOG2_PAGES_PER_FRAG 4 | ||
804 | |||
805 | #define AMDGPU_PTE_VALID (1 << 0) | ||
806 | #define AMDGPU_PTE_SYSTEM (1 << 1) | ||
807 | #define AMDGPU_PTE_SNOOPED (1 << 2) | ||
808 | |||
809 | /* VI only */ | ||
810 | #define AMDGPU_PTE_EXECUTABLE (1 << 4) | ||
811 | |||
812 | #define AMDGPU_PTE_READABLE (1 << 5) | ||
813 | #define AMDGPU_PTE_WRITEABLE (1 << 6) | ||
814 | |||
815 | #define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7) | ||
816 | |||
817 | /* How to programm VM fault handling */ | ||
818 | #define AMDGPU_VM_FAULT_STOP_NEVER 0 | ||
819 | #define AMDGPU_VM_FAULT_STOP_FIRST 1 | ||
820 | #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 | ||
821 | |||
822 | struct amdgpu_vm_pt { | ||
823 | struct amdgpu_bo_list_entry entry; | ||
824 | uint64_t addr; | ||
825 | uint64_t shadow_addr; | ||
826 | }; | ||
827 | |||
828 | struct amdgpu_vm { | ||
829 | /* tree of virtual addresses mapped */ | ||
830 | struct rb_root va; | ||
831 | |||
832 | /* protecting invalidated */ | ||
833 | spinlock_t status_lock; | ||
834 | |||
835 | /* BOs moved, but not yet updated in the PT */ | ||
836 | struct list_head invalidated; | ||
837 | |||
838 | /* BOs cleared in the PT because of a move */ | ||
839 | struct list_head cleared; | ||
840 | |||
841 | /* BO mappings freed, but not yet updated in the PT */ | ||
842 | struct list_head freed; | ||
843 | |||
844 | /* contains the page directory */ | ||
845 | struct amdgpu_bo *page_directory; | ||
846 | unsigned max_pde_used; | ||
847 | struct fence *page_directory_fence; | ||
848 | uint64_t last_eviction_counter; | ||
849 | |||
850 | /* array of page tables, one for each page directory entry */ | ||
851 | struct amdgpu_vm_pt *page_tables; | ||
852 | |||
853 | /* for id and flush management per ring */ | ||
854 | struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS]; | ||
855 | |||
856 | /* protecting freed */ | ||
857 | spinlock_t freed_lock; | ||
858 | |||
859 | /* Scheduler entity for page table updates */ | ||
860 | struct amd_sched_entity entity; | ||
861 | |||
862 | /* client id */ | ||
863 | u64 client_id; | ||
864 | }; | ||
865 | |||
866 | struct amdgpu_vm_id { | ||
867 | struct list_head list; | ||
868 | struct fence *first; | ||
869 | struct amdgpu_sync active; | ||
870 | struct fence *last_flush; | ||
871 | atomic64_t owner; | ||
872 | |||
873 | uint64_t pd_gpu_addr; | ||
874 | /* last flushed PD/PT update */ | ||
875 | struct fence *flushed_updates; | ||
876 | |||
877 | uint32_t current_gpu_reset_count; | ||
878 | |||
879 | uint32_t gds_base; | ||
880 | uint32_t gds_size; | ||
881 | uint32_t gws_base; | ||
882 | uint32_t gws_size; | ||
883 | uint32_t oa_base; | ||
884 | uint32_t oa_size; | ||
885 | }; | ||
886 | |||
887 | struct amdgpu_vm_manager { | ||
888 | /* Handling of VMIDs */ | ||
889 | struct mutex lock; | ||
890 | unsigned num_ids; | ||
891 | struct list_head ids_lru; | ||
892 | struct amdgpu_vm_id ids[AMDGPU_NUM_VM]; | ||
893 | |||
894 | /* Handling of VM fences */ | ||
895 | u64 fence_context; | ||
896 | unsigned seqno[AMDGPU_MAX_RINGS]; | ||
897 | |||
898 | uint32_t max_pfn; | ||
899 | /* vram base address for page table entry */ | ||
900 | u64 vram_base_offset; | ||
901 | /* is vm enabled? */ | ||
902 | bool enabled; | ||
903 | /* vm pte handling */ | ||
904 | const struct amdgpu_vm_pte_funcs *vm_pte_funcs; | ||
905 | struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; | ||
906 | unsigned vm_pte_num_rings; | ||
907 | atomic_t vm_pte_next_ring; | ||
908 | /* client id counter */ | ||
909 | atomic64_t client_counter; | ||
910 | }; | ||
911 | |||
912 | void amdgpu_vm_manager_init(struct amdgpu_device *adev); | ||
913 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev); | ||
914 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
915 | void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
916 | void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, | ||
917 | struct list_head *validated, | ||
918 | struct amdgpu_bo_list_entry *entry); | ||
919 | void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, | ||
920 | struct list_head *duplicates); | ||
921 | void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, | ||
922 | struct amdgpu_vm *vm); | ||
923 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | ||
924 | struct amdgpu_sync *sync, struct fence *fence, | ||
925 | struct amdgpu_job *job); | ||
926 | int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); | ||
927 | void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); | ||
928 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | ||
929 | struct amdgpu_vm *vm); | ||
930 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | ||
931 | struct amdgpu_vm *vm); | ||
932 | int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, | ||
933 | struct amdgpu_sync *sync); | ||
934 | int amdgpu_vm_bo_update(struct amdgpu_device *adev, | ||
935 | struct amdgpu_bo_va *bo_va, | ||
936 | bool clear); | ||
937 | void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, | ||
938 | struct amdgpu_bo *bo); | ||
939 | struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | ||
940 | struct amdgpu_bo *bo); | ||
941 | struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | ||
942 | struct amdgpu_vm *vm, | ||
943 | struct amdgpu_bo *bo); | ||
944 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, | ||
945 | struct amdgpu_bo_va *bo_va, | ||
946 | uint64_t addr, uint64_t offset, | ||
947 | uint64_t size, uint32_t flags); | ||
948 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | ||
949 | struct amdgpu_bo_va *bo_va, | ||
950 | uint64_t addr); | ||
951 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | ||
952 | struct amdgpu_bo_va *bo_va); | ||
953 | |||
954 | /* | 649 | /* |
955 | * context related structures | 650 | * context related structures |
956 | */ | 651 | */ |
@@ -1093,6 +788,16 @@ struct amdgpu_scratch { | |||
1093 | /* | 788 | /* |
1094 | * GFX configurations | 789 | * GFX configurations |
1095 | */ | 790 | */ |
791 | #define AMDGPU_GFX_MAX_SE 4 | ||
792 | #define AMDGPU_GFX_MAX_SH_PER_SE 2 | ||
793 | |||
794 | struct amdgpu_rb_config { | ||
795 | uint32_t rb_backend_disable; | ||
796 | uint32_t user_rb_backend_disable; | ||
797 | uint32_t raster_config; | ||
798 | uint32_t raster_config_1; | ||
799 | }; | ||
800 | |||
1096 | struct amdgpu_gca_config { | 801 | struct amdgpu_gca_config { |
1097 | unsigned max_shader_engines; | 802 | unsigned max_shader_engines; |
1098 | unsigned max_tile_pipes; | 803 | unsigned max_tile_pipes; |
@@ -1121,6 +826,8 @@ struct amdgpu_gca_config { | |||
1121 | 826 | ||
1122 | uint32_t tile_mode_array[32]; | 827 | uint32_t tile_mode_array[32]; |
1123 | uint32_t macrotile_mode_array[16]; | 828 | uint32_t macrotile_mode_array[16]; |
829 | |||
830 | struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE]; | ||
1124 | }; | 831 | }; |
1125 | 832 | ||
1126 | struct amdgpu_cu_info { | 833 | struct amdgpu_cu_info { |
@@ -1133,6 +840,7 @@ struct amdgpu_gfx_funcs { | |||
1133 | /* get the gpu clock counter */ | 840 | /* get the gpu clock counter */ |
1134 | uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); | 841 | uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); |
1135 | void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance); | 842 | void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance); |
843 | void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields); | ||
1136 | }; | 844 | }; |
1137 | 845 | ||
1138 | struct amdgpu_gfx { | 846 | struct amdgpu_gfx { |
@@ -1188,16 +896,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
1188 | int amdgpu_ib_pool_init(struct amdgpu_device *adev); | 896 | int amdgpu_ib_pool_init(struct amdgpu_device *adev); |
1189 | void amdgpu_ib_pool_fini(struct amdgpu_device *adev); | 897 | void amdgpu_ib_pool_fini(struct amdgpu_device *adev); |
1190 | int amdgpu_ib_ring_tests(struct amdgpu_device *adev); | 898 | int amdgpu_ib_ring_tests(struct amdgpu_device *adev); |
1191 | int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); | ||
1192 | void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); | ||
1193 | void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); | ||
1194 | void amdgpu_ring_commit(struct amdgpu_ring *ring); | ||
1195 | void amdgpu_ring_undo(struct amdgpu_ring *ring); | ||
1196 | int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, | ||
1197 | unsigned ring_size, u32 nop, u32 align_mask, | ||
1198 | struct amdgpu_irq_src *irq_src, unsigned irq_type, | ||
1199 | enum amdgpu_ring_type ring_type); | ||
1200 | void amdgpu_ring_fini(struct amdgpu_ring *ring); | ||
1201 | 899 | ||
1202 | /* | 900 | /* |
1203 | * CS. | 901 | * CS. |
@@ -1294,354 +992,6 @@ struct amdgpu_wb { | |||
1294 | int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb); | 992 | int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb); |
1295 | void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb); | 993 | void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb); |
1296 | 994 | ||
1297 | |||
1298 | |||
1299 | enum amdgpu_int_thermal_type { | ||
1300 | THERMAL_TYPE_NONE, | ||
1301 | THERMAL_TYPE_EXTERNAL, | ||
1302 | THERMAL_TYPE_EXTERNAL_GPIO, | ||
1303 | THERMAL_TYPE_RV6XX, | ||
1304 | THERMAL_TYPE_RV770, | ||
1305 | THERMAL_TYPE_ADT7473_WITH_INTERNAL, | ||
1306 | THERMAL_TYPE_EVERGREEN, | ||
1307 | THERMAL_TYPE_SUMO, | ||
1308 | THERMAL_TYPE_NI, | ||
1309 | THERMAL_TYPE_SI, | ||
1310 | THERMAL_TYPE_EMC2103_WITH_INTERNAL, | ||
1311 | THERMAL_TYPE_CI, | ||
1312 | THERMAL_TYPE_KV, | ||
1313 | }; | ||
1314 | |||
1315 | enum amdgpu_dpm_auto_throttle_src { | ||
1316 | AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, | ||
1317 | AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL | ||
1318 | }; | ||
1319 | |||
1320 | enum amdgpu_dpm_event_src { | ||
1321 | AMDGPU_DPM_EVENT_SRC_ANALOG = 0, | ||
1322 | AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1, | ||
1323 | AMDGPU_DPM_EVENT_SRC_DIGITAL = 2, | ||
1324 | AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, | ||
1325 | AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4 | ||
1326 | }; | ||
1327 | |||
1328 | #define AMDGPU_MAX_VCE_LEVELS 6 | ||
1329 | |||
1330 | enum amdgpu_vce_level { | ||
1331 | AMDGPU_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */ | ||
1332 | AMDGPU_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */ | ||
1333 | AMDGPU_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */ | ||
1334 | AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */ | ||
1335 | AMDGPU_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */ | ||
1336 | AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */ | ||
1337 | }; | ||
1338 | |||
1339 | struct amdgpu_ps { | ||
1340 | u32 caps; /* vbios flags */ | ||
1341 | u32 class; /* vbios flags */ | ||
1342 | u32 class2; /* vbios flags */ | ||
1343 | /* UVD clocks */ | ||
1344 | u32 vclk; | ||
1345 | u32 dclk; | ||
1346 | /* VCE clocks */ | ||
1347 | u32 evclk; | ||
1348 | u32 ecclk; | ||
1349 | bool vce_active; | ||
1350 | enum amdgpu_vce_level vce_level; | ||
1351 | /* asic priv */ | ||
1352 | void *ps_priv; | ||
1353 | }; | ||
1354 | |||
1355 | struct amdgpu_dpm_thermal { | ||
1356 | /* thermal interrupt work */ | ||
1357 | struct work_struct work; | ||
1358 | /* low temperature threshold */ | ||
1359 | int min_temp; | ||
1360 | /* high temperature threshold */ | ||
1361 | int max_temp; | ||
1362 | /* was last interrupt low to high or high to low */ | ||
1363 | bool high_to_low; | ||
1364 | /* interrupt source */ | ||
1365 | struct amdgpu_irq_src irq; | ||
1366 | }; | ||
1367 | |||
1368 | enum amdgpu_clk_action | ||
1369 | { | ||
1370 | AMDGPU_SCLK_UP = 1, | ||
1371 | AMDGPU_SCLK_DOWN | ||
1372 | }; | ||
1373 | |||
1374 | struct amdgpu_blacklist_clocks | ||
1375 | { | ||
1376 | u32 sclk; | ||
1377 | u32 mclk; | ||
1378 | enum amdgpu_clk_action action; | ||
1379 | }; | ||
1380 | |||
1381 | struct amdgpu_clock_and_voltage_limits { | ||
1382 | u32 sclk; | ||
1383 | u32 mclk; | ||
1384 | u16 vddc; | ||
1385 | u16 vddci; | ||
1386 | }; | ||
1387 | |||
1388 | struct amdgpu_clock_array { | ||
1389 | u32 count; | ||
1390 | u32 *values; | ||
1391 | }; | ||
1392 | |||
1393 | struct amdgpu_clock_voltage_dependency_entry { | ||
1394 | u32 clk; | ||
1395 | u16 v; | ||
1396 | }; | ||
1397 | |||
1398 | struct amdgpu_clock_voltage_dependency_table { | ||
1399 | u32 count; | ||
1400 | struct amdgpu_clock_voltage_dependency_entry *entries; | ||
1401 | }; | ||
1402 | |||
1403 | union amdgpu_cac_leakage_entry { | ||
1404 | struct { | ||
1405 | u16 vddc; | ||
1406 | u32 leakage; | ||
1407 | }; | ||
1408 | struct { | ||
1409 | u16 vddc1; | ||
1410 | u16 vddc2; | ||
1411 | u16 vddc3; | ||
1412 | }; | ||
1413 | }; | ||
1414 | |||
1415 | struct amdgpu_cac_leakage_table { | ||
1416 | u32 count; | ||
1417 | union amdgpu_cac_leakage_entry *entries; | ||
1418 | }; | ||
1419 | |||
1420 | struct amdgpu_phase_shedding_limits_entry { | ||
1421 | u16 voltage; | ||
1422 | u32 sclk; | ||
1423 | u32 mclk; | ||
1424 | }; | ||
1425 | |||
1426 | struct amdgpu_phase_shedding_limits_table { | ||
1427 | u32 count; | ||
1428 | struct amdgpu_phase_shedding_limits_entry *entries; | ||
1429 | }; | ||
1430 | |||
1431 | struct amdgpu_uvd_clock_voltage_dependency_entry { | ||
1432 | u32 vclk; | ||
1433 | u32 dclk; | ||
1434 | u16 v; | ||
1435 | }; | ||
1436 | |||
1437 | struct amdgpu_uvd_clock_voltage_dependency_table { | ||
1438 | u8 count; | ||
1439 | struct amdgpu_uvd_clock_voltage_dependency_entry *entries; | ||
1440 | }; | ||
1441 | |||
1442 | struct amdgpu_vce_clock_voltage_dependency_entry { | ||
1443 | u32 ecclk; | ||
1444 | u32 evclk; | ||
1445 | u16 v; | ||
1446 | }; | ||
1447 | |||
1448 | struct amdgpu_vce_clock_voltage_dependency_table { | ||
1449 | u8 count; | ||
1450 | struct amdgpu_vce_clock_voltage_dependency_entry *entries; | ||
1451 | }; | ||
1452 | |||
1453 | struct amdgpu_ppm_table { | ||
1454 | u8 ppm_design; | ||
1455 | u16 cpu_core_number; | ||
1456 | u32 platform_tdp; | ||
1457 | u32 small_ac_platform_tdp; | ||
1458 | u32 platform_tdc; | ||
1459 | u32 small_ac_platform_tdc; | ||
1460 | u32 apu_tdp; | ||
1461 | u32 dgpu_tdp; | ||
1462 | u32 dgpu_ulv_power; | ||
1463 | u32 tj_max; | ||
1464 | }; | ||
1465 | |||
1466 | struct amdgpu_cac_tdp_table { | ||
1467 | u16 tdp; | ||
1468 | u16 configurable_tdp; | ||
1469 | u16 tdc; | ||
1470 | u16 battery_power_limit; | ||
1471 | u16 small_power_limit; | ||
1472 | u16 low_cac_leakage; | ||
1473 | u16 high_cac_leakage; | ||
1474 | u16 maximum_power_delivery_limit; | ||
1475 | }; | ||
1476 | |||
1477 | struct amdgpu_dpm_dynamic_state { | ||
1478 | struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk; | ||
1479 | struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk; | ||
1480 | struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk; | ||
1481 | struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk; | ||
1482 | struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk; | ||
1483 | struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table; | ||
1484 | struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table; | ||
1485 | struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table; | ||
1486 | struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table; | ||
1487 | struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk; | ||
1488 | struct amdgpu_clock_array valid_sclk_values; | ||
1489 | struct amdgpu_clock_array valid_mclk_values; | ||
1490 | struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc; | ||
1491 | struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac; | ||
1492 | u32 mclk_sclk_ratio; | ||
1493 | u32 sclk_mclk_delta; | ||
1494 | u16 vddc_vddci_delta; | ||
1495 | u16 min_vddc_for_pcie_gen2; | ||
1496 | struct amdgpu_cac_leakage_table cac_leakage_table; | ||
1497 | struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table; | ||
1498 | struct amdgpu_ppm_table *ppm_table; | ||
1499 | struct amdgpu_cac_tdp_table *cac_tdp_table; | ||
1500 | }; | ||
1501 | |||
1502 | struct amdgpu_dpm_fan { | ||
1503 | u16 t_min; | ||
1504 | u16 t_med; | ||
1505 | u16 t_high; | ||
1506 | u16 pwm_min; | ||
1507 | u16 pwm_med; | ||
1508 | u16 pwm_high; | ||
1509 | u8 t_hyst; | ||
1510 | u32 cycle_delay; | ||
1511 | u16 t_max; | ||
1512 | u8 control_mode; | ||
1513 | u16 default_max_fan_pwm; | ||
1514 | u16 default_fan_output_sensitivity; | ||
1515 | u16 fan_output_sensitivity; | ||
1516 | bool ucode_fan_control; | ||
1517 | }; | ||
1518 | |||
1519 | enum amdgpu_pcie_gen { | ||
1520 | AMDGPU_PCIE_GEN1 = 0, | ||
1521 | AMDGPU_PCIE_GEN2 = 1, | ||
1522 | AMDGPU_PCIE_GEN3 = 2, | ||
1523 | AMDGPU_PCIE_GEN_INVALID = 0xffff | ||
1524 | }; | ||
1525 | |||
1526 | enum amdgpu_dpm_forced_level { | ||
1527 | AMDGPU_DPM_FORCED_LEVEL_AUTO = 0, | ||
1528 | AMDGPU_DPM_FORCED_LEVEL_LOW = 1, | ||
1529 | AMDGPU_DPM_FORCED_LEVEL_HIGH = 2, | ||
1530 | AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3, | ||
1531 | }; | ||
1532 | |||
1533 | struct amdgpu_vce_state { | ||
1534 | /* vce clocks */ | ||
1535 | u32 evclk; | ||
1536 | u32 ecclk; | ||
1537 | /* gpu clocks */ | ||
1538 | u32 sclk; | ||
1539 | u32 mclk; | ||
1540 | u8 clk_idx; | ||
1541 | u8 pstate; | ||
1542 | }; | ||
1543 | |||
1544 | struct amdgpu_dpm_funcs { | ||
1545 | int (*get_temperature)(struct amdgpu_device *adev); | ||
1546 | int (*pre_set_power_state)(struct amdgpu_device *adev); | ||
1547 | int (*set_power_state)(struct amdgpu_device *adev); | ||
1548 | void (*post_set_power_state)(struct amdgpu_device *adev); | ||
1549 | void (*display_configuration_changed)(struct amdgpu_device *adev); | ||
1550 | u32 (*get_sclk)(struct amdgpu_device *adev, bool low); | ||
1551 | u32 (*get_mclk)(struct amdgpu_device *adev, bool low); | ||
1552 | void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps); | ||
1553 | void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m); | ||
1554 | int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level); | ||
1555 | bool (*vblank_too_short)(struct amdgpu_device *adev); | ||
1556 | void (*powergate_uvd)(struct amdgpu_device *adev, bool gate); | ||
1557 | void (*powergate_vce)(struct amdgpu_device *adev, bool gate); | ||
1558 | void (*enable_bapm)(struct amdgpu_device *adev, bool enable); | ||
1559 | void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode); | ||
1560 | u32 (*get_fan_control_mode)(struct amdgpu_device *adev); | ||
1561 | int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed); | ||
1562 | int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed); | ||
1563 | int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask); | ||
1564 | int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf); | ||
1565 | int (*get_sclk_od)(struct amdgpu_device *adev); | ||
1566 | int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value); | ||
1567 | int (*get_mclk_od)(struct amdgpu_device *adev); | ||
1568 | int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value); | ||
1569 | }; | ||
1570 | |||
1571 | struct amdgpu_dpm { | ||
1572 | struct amdgpu_ps *ps; | ||
1573 | /* number of valid power states */ | ||
1574 | int num_ps; | ||
1575 | /* current power state that is active */ | ||
1576 | struct amdgpu_ps *current_ps; | ||
1577 | /* requested power state */ | ||
1578 | struct amdgpu_ps *requested_ps; | ||
1579 | /* boot up power state */ | ||
1580 | struct amdgpu_ps *boot_ps; | ||
1581 | /* default uvd power state */ | ||
1582 | struct amdgpu_ps *uvd_ps; | ||
1583 | /* vce requirements */ | ||
1584 | struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS]; | ||
1585 | enum amdgpu_vce_level vce_level; | ||
1586 | enum amd_pm_state_type state; | ||
1587 | enum amd_pm_state_type user_state; | ||
1588 | u32 platform_caps; | ||
1589 | u32 voltage_response_time; | ||
1590 | u32 backbias_response_time; | ||
1591 | void *priv; | ||
1592 | u32 new_active_crtcs; | ||
1593 | int new_active_crtc_count; | ||
1594 | u32 current_active_crtcs; | ||
1595 | int current_active_crtc_count; | ||
1596 | struct amdgpu_dpm_dynamic_state dyn_state; | ||
1597 | struct amdgpu_dpm_fan fan; | ||
1598 | u32 tdp_limit; | ||
1599 | u32 near_tdp_limit; | ||
1600 | u32 near_tdp_limit_adjusted; | ||
1601 | u32 sq_ramping_threshold; | ||
1602 | u32 cac_leakage; | ||
1603 | u16 tdp_od_limit; | ||
1604 | u32 tdp_adjustment; | ||
1605 | u16 load_line_slope; | ||
1606 | bool power_control; | ||
1607 | bool ac_power; | ||
1608 | /* special states active */ | ||
1609 | bool thermal_active; | ||
1610 | bool uvd_active; | ||
1611 | bool vce_active; | ||
1612 | /* thermal handling */ | ||
1613 | struct amdgpu_dpm_thermal thermal; | ||
1614 | /* forced levels */ | ||
1615 | enum amdgpu_dpm_forced_level forced_level; | ||
1616 | }; | ||
1617 | |||
1618 | struct amdgpu_pm { | ||
1619 | struct mutex mutex; | ||
1620 | u32 current_sclk; | ||
1621 | u32 current_mclk; | ||
1622 | u32 default_sclk; | ||
1623 | u32 default_mclk; | ||
1624 | struct amdgpu_i2c_chan *i2c_bus; | ||
1625 | /* internal thermal controller on rv6xx+ */ | ||
1626 | enum amdgpu_int_thermal_type int_thermal_type; | ||
1627 | struct device *int_hwmon_dev; | ||
1628 | /* fan control parameters */ | ||
1629 | bool no_fan; | ||
1630 | u8 fan_pulses_per_revolution; | ||
1631 | u8 fan_min_rpm; | ||
1632 | u8 fan_max_rpm; | ||
1633 | /* dpm */ | ||
1634 | bool dpm_enabled; | ||
1635 | bool sysfs_initialized; | ||
1636 | struct amdgpu_dpm dpm; | ||
1637 | const struct firmware *fw; /* SMC firmware */ | ||
1638 | uint32_t fw_version; | ||
1639 | const struct amdgpu_dpm_funcs *funcs; | ||
1640 | uint32_t pcie_gen_mask; | ||
1641 | uint32_t pcie_mlw_mask; | ||
1642 | struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */ | ||
1643 | }; | ||
1644 | |||
1645 | void amdgpu_get_pcie_info(struct amdgpu_device *adev); | 995 | void amdgpu_get_pcie_info(struct amdgpu_device *adev); |
1646 | 996 | ||
1647 | /* | 997 | /* |
@@ -1939,14 +1289,6 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); | |||
1939 | typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); | 1289 | typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); |
1940 | typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); | 1290 | typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); |
1941 | 1291 | ||
1942 | struct amdgpu_ip_block_status { | ||
1943 | bool valid; | ||
1944 | bool sw; | ||
1945 | bool hw; | ||
1946 | bool late_initialized; | ||
1947 | bool hang; | ||
1948 | }; | ||
1949 | |||
1950 | struct amdgpu_device { | 1292 | struct amdgpu_device { |
1951 | struct device *dev; | 1293 | struct device *dev; |
1952 | struct drm_device *ddev; | 1294 | struct drm_device *ddev; |
@@ -2102,9 +1444,8 @@ struct amdgpu_device { | |||
2102 | /* GDS */ | 1444 | /* GDS */ |
2103 | struct amdgpu_gds gds; | 1445 | struct amdgpu_gds gds; |
2104 | 1446 | ||
2105 | const struct amdgpu_ip_block_version *ip_blocks; | 1447 | struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM]; |
2106 | int num_ip_blocks; | 1448 | int num_ip_blocks; |
2107 | struct amdgpu_ip_block_status *ip_block_status; | ||
2108 | struct mutex mn_lock; | 1449 | struct mutex mn_lock; |
2109 | DECLARE_HASHTABLE(mn_hash, 7); | 1450 | DECLARE_HASHTABLE(mn_hash, 7); |
2110 | 1451 | ||
@@ -2127,6 +1468,11 @@ struct amdgpu_device { | |||
2127 | 1468 | ||
2128 | }; | 1469 | }; |
2129 | 1470 | ||
1471 | static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) | ||
1472 | { | ||
1473 | return container_of(bdev, struct amdgpu_device, mman.bdev); | ||
1474 | } | ||
1475 | |||
2130 | bool amdgpu_device_is_px(struct drm_device *dev); | 1476 | bool amdgpu_device_is_px(struct drm_device *dev); |
2131 | int amdgpu_device_init(struct amdgpu_device *adev, | 1477 | int amdgpu_device_init(struct amdgpu_device *adev, |
2132 | struct drm_device *ddev, | 1478 | struct drm_device *ddev, |
@@ -2278,8 +1624,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) | |||
2278 | #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) | 1624 | #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) |
2279 | #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) | 1625 | #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) |
2280 | #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) | 1626 | #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) |
2281 | #define amdgpu_ring_get_emit_ib_size(r) (r)->funcs->get_emit_ib_size((r)) | ||
2282 | #define amdgpu_ring_get_dma_frame_size(r) (r)->funcs->get_dma_frame_size((r)) | ||
2283 | #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) | 1627 | #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) |
2284 | #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) | 1628 | #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) |
2285 | #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) | 1629 | #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) |
@@ -2301,108 +1645,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) | |||
2301 | #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s)) | 1645 | #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s)) |
2302 | #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) | 1646 | #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) |
2303 | #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) | 1647 | #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) |
2304 | #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev)) | ||
2305 | #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev)) | ||
2306 | #define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev)) | ||
2307 | #define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev)) | ||
2308 | #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps)) | ||
2309 | #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev)) | ||
2310 | #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) | ||
2311 | #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) | 1648 | #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) |
2312 | #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) | 1649 | #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) |
2313 | |||
2314 | #define amdgpu_dpm_read_sensor(adev, idx, value) \ | ||
2315 | ((adev)->pp_enabled ? \ | ||
2316 | (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \ | ||
2317 | -EINVAL) | ||
2318 | |||
2319 | #define amdgpu_dpm_get_temperature(adev) \ | ||
2320 | ((adev)->pp_enabled ? \ | ||
2321 | (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \ | ||
2322 | (adev)->pm.funcs->get_temperature((adev))) | ||
2323 | |||
2324 | #define amdgpu_dpm_set_fan_control_mode(adev, m) \ | ||
2325 | ((adev)->pp_enabled ? \ | ||
2326 | (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \ | ||
2327 | (adev)->pm.funcs->set_fan_control_mode((adev), (m))) | ||
2328 | |||
2329 | #define amdgpu_dpm_get_fan_control_mode(adev) \ | ||
2330 | ((adev)->pp_enabled ? \ | ||
2331 | (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \ | ||
2332 | (adev)->pm.funcs->get_fan_control_mode((adev))) | ||
2333 | |||
2334 | #define amdgpu_dpm_set_fan_speed_percent(adev, s) \ | ||
2335 | ((adev)->pp_enabled ? \ | ||
2336 | (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ | ||
2337 | (adev)->pm.funcs->set_fan_speed_percent((adev), (s))) | ||
2338 | |||
2339 | #define amdgpu_dpm_get_fan_speed_percent(adev, s) \ | ||
2340 | ((adev)->pp_enabled ? \ | ||
2341 | (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ | ||
2342 | (adev)->pm.funcs->get_fan_speed_percent((adev), (s))) | ||
2343 | |||
2344 | #define amdgpu_dpm_get_sclk(adev, l) \ | ||
2345 | ((adev)->pp_enabled ? \ | ||
2346 | (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \ | ||
2347 | (adev)->pm.funcs->get_sclk((adev), (l))) | ||
2348 | |||
2349 | #define amdgpu_dpm_get_mclk(adev, l) \ | ||
2350 | ((adev)->pp_enabled ? \ | ||
2351 | (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \ | ||
2352 | (adev)->pm.funcs->get_mclk((adev), (l))) | ||
2353 | |||
2354 | |||
2355 | #define amdgpu_dpm_force_performance_level(adev, l) \ | ||
2356 | ((adev)->pp_enabled ? \ | ||
2357 | (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \ | ||
2358 | (adev)->pm.funcs->force_performance_level((adev), (l))) | ||
2359 | |||
2360 | #define amdgpu_dpm_powergate_uvd(adev, g) \ | ||
2361 | ((adev)->pp_enabled ? \ | ||
2362 | (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \ | ||
2363 | (adev)->pm.funcs->powergate_uvd((adev), (g))) | ||
2364 | |||
2365 | #define amdgpu_dpm_powergate_vce(adev, g) \ | ||
2366 | ((adev)->pp_enabled ? \ | ||
2367 | (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \ | ||
2368 | (adev)->pm.funcs->powergate_vce((adev), (g))) | ||
2369 | |||
2370 | #define amdgpu_dpm_get_current_power_state(adev) \ | ||
2371 | (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle) | ||
2372 | |||
2373 | #define amdgpu_dpm_get_performance_level(adev) \ | ||
2374 | (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) | ||
2375 | |||
2376 | #define amdgpu_dpm_get_pp_num_states(adev, data) \ | ||
2377 | (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data) | ||
2378 | |||
2379 | #define amdgpu_dpm_get_pp_table(adev, table) \ | ||
2380 | (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table) | ||
2381 | |||
2382 | #define amdgpu_dpm_set_pp_table(adev, buf, size) \ | ||
2383 | (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size) | ||
2384 | |||
2385 | #define amdgpu_dpm_print_clock_levels(adev, type, buf) \ | ||
2386 | (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf) | ||
2387 | |||
2388 | #define amdgpu_dpm_force_clock_level(adev, type, level) \ | ||
2389 | (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level) | ||
2390 | |||
2391 | #define amdgpu_dpm_get_sclk_od(adev) \ | ||
2392 | (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle) | ||
2393 | |||
2394 | #define amdgpu_dpm_set_sclk_od(adev, value) \ | ||
2395 | (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value) | ||
2396 | |||
2397 | #define amdgpu_dpm_get_mclk_od(adev) \ | ||
2398 | ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle)) | ||
2399 | |||
2400 | #define amdgpu_dpm_set_mclk_od(adev, value) \ | ||
2401 | ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value)) | ||
2402 | |||
2403 | #define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \ | ||
2404 | (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output)) | ||
2405 | |||
2406 | #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a)) | 1650 | #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a)) |
2407 | 1651 | ||
2408 | /* Common functions */ | 1652 | /* Common functions */ |