diff options
153 files changed, 4293 insertions, 4595 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 248a05d02917..41bd2bf28f4c 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile | |||
@@ -24,7 +24,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ | |||
24 | atombios_encoders.o amdgpu_sa.o atombios_i2c.o \ | 24 | atombios_encoders.o amdgpu_sa.o atombios_i2c.o \ |
25 | amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ | 25 | amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ |
26 | amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ | 26 | amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ |
27 | amdgpu_gtt_mgr.o | 27 | amdgpu_gtt_mgr.o amdgpu_vram_mgr.o |
28 | 28 | ||
29 | # add asic specific block | 29 | # add asic specific block |
30 | amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ | 30 | amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ |
diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h index b8d66670bb17..06192698bd96 100644 --- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h +++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h | |||
@@ -90,7 +90,6 @@ | |||
90 | #define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24 | 90 | #define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24 |
91 | #define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 0x25 | 91 | #define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 0x25 |
92 | #define ENCODER_OBJECT_ID_INTERNAL_AMCLK 0x27 | 92 | #define ENCODER_OBJECT_ID_INTERNAL_AMCLK 0x27 |
93 | #define ENCODER_OBJECT_ID_VIRTUAL 0x28 | ||
94 | 93 | ||
95 | #define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF | 94 | #define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF |
96 | 95 | ||
@@ -120,7 +119,6 @@ | |||
120 | #define CONNECTOR_OBJECT_ID_eDP 0x14 | 119 | #define CONNECTOR_OBJECT_ID_eDP 0x14 |
121 | #define CONNECTOR_OBJECT_ID_MXM 0x15 | 120 | #define CONNECTOR_OBJECT_ID_MXM 0x15 |
122 | #define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16 | 121 | #define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16 |
123 | #define CONNECTOR_OBJECT_ID_VIRTUAL 0x17 | ||
124 | 122 | ||
125 | /* deleted */ | 123 | /* deleted */ |
126 | 124 | ||
@@ -149,7 +147,6 @@ | |||
149 | #define GRAPH_OBJECT_ENUM_ID5 0x05 | 147 | #define GRAPH_OBJECT_ENUM_ID5 0x05 |
150 | #define GRAPH_OBJECT_ENUM_ID6 0x06 | 148 | #define GRAPH_OBJECT_ENUM_ID6 0x06 |
151 | #define GRAPH_OBJECT_ENUM_ID7 0x07 | 149 | #define GRAPH_OBJECT_ENUM_ID7 0x07 |
152 | #define GRAPH_OBJECT_ENUM_VIRTUAL 0x08 | ||
153 | 150 | ||
154 | /****************************************************/ | 151 | /****************************************************/ |
155 | /* Graphics Object ID Bit definition */ | 152 | /* Graphics Object ID Bit definition */ |
@@ -411,10 +408,6 @@ | |||
411 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ | 408 | GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ |
412 | ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT) | 409 | ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT) |
413 | 410 | ||
414 | #define ENCODER_VIRTUAL_ENUM_VIRTUAL ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ | ||
415 | GRAPH_OBJECT_ENUM_VIRTUAL << ENUM_ID_SHIFT |\ | ||
416 | ENCODER_OBJECT_ID_VIRTUAL << OBJECT_ID_SHIFT) | ||
417 | |||
418 | /****************************************************/ | 411 | /****************************************************/ |
419 | /* Connector Object ID definition - Shared with BIOS */ | 412 | /* Connector Object ID definition - Shared with BIOS */ |
420 | /****************************************************/ | 413 | /****************************************************/ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 039b57e4644c..217df2459a98 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -53,7 +53,11 @@ | |||
53 | #include "amdgpu_ucode.h" | 53 | #include "amdgpu_ucode.h" |
54 | #include "amdgpu_ttm.h" | 54 | #include "amdgpu_ttm.h" |
55 | #include "amdgpu_gds.h" | 55 | #include "amdgpu_gds.h" |
56 | #include "amdgpu_sync.h" | ||
57 | #include "amdgpu_ring.h" | ||
58 | #include "amdgpu_vm.h" | ||
56 | #include "amd_powerplay.h" | 59 | #include "amd_powerplay.h" |
60 | #include "amdgpu_dpm.h" | ||
57 | #include "amdgpu_acp.h" | 61 | #include "amdgpu_acp.h" |
58 | 62 | ||
59 | #include "gpu_scheduler.h" | 63 | #include "gpu_scheduler.h" |
@@ -97,6 +101,7 @@ extern char *amdgpu_disable_cu; | |||
97 | extern int amdgpu_sclk_deep_sleep_en; | 101 | extern int amdgpu_sclk_deep_sleep_en; |
98 | extern char *amdgpu_virtual_display; | 102 | extern char *amdgpu_virtual_display; |
99 | extern unsigned amdgpu_pp_feature_mask; | 103 | extern unsigned amdgpu_pp_feature_mask; |
104 | extern int amdgpu_vram_page_split; | ||
100 | 105 | ||
101 | #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 | 106 | #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 |
102 | #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ | 107 | #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ |
@@ -107,12 +112,6 @@ extern unsigned amdgpu_pp_feature_mask; | |||
107 | #define AMDGPUFB_CONN_LIMIT 4 | 112 | #define AMDGPUFB_CONN_LIMIT 4 |
108 | #define AMDGPU_BIOS_NUM_SCRATCH 8 | 113 | #define AMDGPU_BIOS_NUM_SCRATCH 8 |
109 | 114 | ||
110 | /* max number of rings */ | ||
111 | #define AMDGPU_MAX_RINGS 16 | ||
112 | #define AMDGPU_MAX_GFX_RINGS 1 | ||
113 | #define AMDGPU_MAX_COMPUTE_RINGS 8 | ||
114 | #define AMDGPU_MAX_VCE_RINGS 3 | ||
115 | |||
116 | /* max number of IP instances */ | 115 | /* max number of IP instances */ |
117 | #define AMDGPU_MAX_SDMA_INSTANCES 2 | 116 | #define AMDGPU_MAX_SDMA_INSTANCES 2 |
118 | 117 | ||
@@ -152,8 +151,6 @@ extern unsigned amdgpu_pp_feature_mask; | |||
152 | 151 | ||
153 | struct amdgpu_device; | 152 | struct amdgpu_device; |
154 | struct amdgpu_ib; | 153 | struct amdgpu_ib; |
155 | struct amdgpu_vm; | ||
156 | struct amdgpu_ring; | ||
157 | struct amdgpu_cs_parser; | 154 | struct amdgpu_cs_parser; |
158 | struct amdgpu_job; | 155 | struct amdgpu_job; |
159 | struct amdgpu_irq_src; | 156 | struct amdgpu_irq_src; |
@@ -198,21 +195,38 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev, | |||
198 | bool amdgpu_is_idle(struct amdgpu_device *adev, | 195 | bool amdgpu_is_idle(struct amdgpu_device *adev, |
199 | enum amd_ip_block_type block_type); | 196 | enum amd_ip_block_type block_type); |
200 | 197 | ||
198 | #define AMDGPU_MAX_IP_NUM 16 | ||
199 | |||
200 | struct amdgpu_ip_block_status { | ||
201 | bool valid; | ||
202 | bool sw; | ||
203 | bool hw; | ||
204 | bool late_initialized; | ||
205 | bool hang; | ||
206 | }; | ||
207 | |||
201 | struct amdgpu_ip_block_version { | 208 | struct amdgpu_ip_block_version { |
202 | enum amd_ip_block_type type; | 209 | const enum amd_ip_block_type type; |
203 | u32 major; | 210 | const u32 major; |
204 | u32 minor; | 211 | const u32 minor; |
205 | u32 rev; | 212 | const u32 rev; |
206 | const struct amd_ip_funcs *funcs; | 213 | const struct amd_ip_funcs *funcs; |
207 | }; | 214 | }; |
208 | 215 | ||
216 | struct amdgpu_ip_block { | ||
217 | struct amdgpu_ip_block_status status; | ||
218 | const struct amdgpu_ip_block_version *version; | ||
219 | }; | ||
220 | |||
209 | int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, | 221 | int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, |
210 | enum amd_ip_block_type type, | 222 | enum amd_ip_block_type type, |
211 | u32 major, u32 minor); | 223 | u32 major, u32 minor); |
212 | 224 | ||
213 | const struct amdgpu_ip_block_version * amdgpu_get_ip_block( | 225 | struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev, |
214 | struct amdgpu_device *adev, | 226 | enum amd_ip_block_type type); |
215 | enum amd_ip_block_type type); | 227 | |
228 | int amdgpu_ip_block_add(struct amdgpu_device *adev, | ||
229 | const struct amdgpu_ip_block_version *ip_block_version); | ||
216 | 230 | ||
217 | /* provided by hw blocks that can move/clear data. e.g., gfx or sdma */ | 231 | /* provided by hw blocks that can move/clear data. e.g., gfx or sdma */ |
218 | struct amdgpu_buffer_funcs { | 232 | struct amdgpu_buffer_funcs { |
@@ -286,47 +300,6 @@ struct amdgpu_ih_funcs { | |||
286 | void (*set_rptr)(struct amdgpu_device *adev); | 300 | void (*set_rptr)(struct amdgpu_device *adev); |
287 | }; | 301 | }; |
288 | 302 | ||
289 | /* provided by hw blocks that expose a ring buffer for commands */ | ||
290 | struct amdgpu_ring_funcs { | ||
291 | /* ring read/write ptr handling */ | ||
292 | u32 (*get_rptr)(struct amdgpu_ring *ring); | ||
293 | u32 (*get_wptr)(struct amdgpu_ring *ring); | ||
294 | void (*set_wptr)(struct amdgpu_ring *ring); | ||
295 | /* validating and patching of IBs */ | ||
296 | int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx); | ||
297 | /* command emit functions */ | ||
298 | void (*emit_ib)(struct amdgpu_ring *ring, | ||
299 | struct amdgpu_ib *ib, | ||
300 | unsigned vm_id, bool ctx_switch); | ||
301 | void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, | ||
302 | uint64_t seq, unsigned flags); | ||
303 | void (*emit_pipeline_sync)(struct amdgpu_ring *ring); | ||
304 | void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id, | ||
305 | uint64_t pd_addr); | ||
306 | void (*emit_hdp_flush)(struct amdgpu_ring *ring); | ||
307 | void (*emit_hdp_invalidate)(struct amdgpu_ring *ring); | ||
308 | void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid, | ||
309 | uint32_t gds_base, uint32_t gds_size, | ||
310 | uint32_t gws_base, uint32_t gws_size, | ||
311 | uint32_t oa_base, uint32_t oa_size); | ||
312 | /* testing functions */ | ||
313 | int (*test_ring)(struct amdgpu_ring *ring); | ||
314 | int (*test_ib)(struct amdgpu_ring *ring, long timeout); | ||
315 | /* insert NOP packets */ | ||
316 | void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); | ||
317 | /* pad the indirect buffer to the necessary number of dw */ | ||
318 | void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib); | ||
319 | unsigned (*init_cond_exec)(struct amdgpu_ring *ring); | ||
320 | void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset); | ||
321 | /* note usage for clock and power gating */ | ||
322 | void (*begin_use)(struct amdgpu_ring *ring); | ||
323 | void (*end_use)(struct amdgpu_ring *ring); | ||
324 | void (*emit_switch_buffer) (struct amdgpu_ring *ring); | ||
325 | void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags); | ||
326 | unsigned (*get_emit_ib_size) (struct amdgpu_ring *ring); | ||
327 | unsigned (*get_dma_frame_size) (struct amdgpu_ring *ring); | ||
328 | }; | ||
329 | |||
330 | /* | 303 | /* |
331 | * BIOS. | 304 | * BIOS. |
332 | */ | 305 | */ |
@@ -364,47 +337,6 @@ struct amdgpu_clock { | |||
364 | }; | 337 | }; |
365 | 338 | ||
366 | /* | 339 | /* |
367 | * Fences. | ||
368 | */ | ||
369 | struct amdgpu_fence_driver { | ||
370 | uint64_t gpu_addr; | ||
371 | volatile uint32_t *cpu_addr; | ||
372 | /* sync_seq is protected by ring emission lock */ | ||
373 | uint32_t sync_seq; | ||
374 | atomic_t last_seq; | ||
375 | bool initialized; | ||
376 | struct amdgpu_irq_src *irq_src; | ||
377 | unsigned irq_type; | ||
378 | struct timer_list fallback_timer; | ||
379 | unsigned num_fences_mask; | ||
380 | spinlock_t lock; | ||
381 | struct fence **fences; | ||
382 | }; | ||
383 | |||
384 | /* some special values for the owner field */ | ||
385 | #define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul) | ||
386 | #define AMDGPU_FENCE_OWNER_VM ((void*)1ul) | ||
387 | |||
388 | #define AMDGPU_FENCE_FLAG_64BIT (1 << 0) | ||
389 | #define AMDGPU_FENCE_FLAG_INT (1 << 1) | ||
390 | |||
391 | int amdgpu_fence_driver_init(struct amdgpu_device *adev); | ||
392 | void amdgpu_fence_driver_fini(struct amdgpu_device *adev); | ||
393 | void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); | ||
394 | |||
395 | int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, | ||
396 | unsigned num_hw_submission); | ||
397 | int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, | ||
398 | struct amdgpu_irq_src *irq_src, | ||
399 | unsigned irq_type); | ||
400 | void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); | ||
401 | void amdgpu_fence_driver_resume(struct amdgpu_device *adev); | ||
402 | int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence); | ||
403 | void amdgpu_fence_process(struct amdgpu_ring *ring); | ||
404 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); | ||
405 | unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); | ||
406 | |||
407 | /* | ||
408 | * BO. | 340 | * BO. |
409 | */ | 341 | */ |
410 | struct amdgpu_bo_list_entry { | 342 | struct amdgpu_bo_list_entry { |
@@ -464,7 +396,6 @@ struct amdgpu_bo { | |||
464 | */ | 396 | */ |
465 | struct list_head va; | 397 | struct list_head va; |
466 | /* Constant after initialization */ | 398 | /* Constant after initialization */ |
467 | struct amdgpu_device *adev; | ||
468 | struct drm_gem_object gem_base; | 399 | struct drm_gem_object gem_base; |
469 | struct amdgpu_bo *parent; | 400 | struct amdgpu_bo *parent; |
470 | struct amdgpu_bo *shadow; | 401 | struct amdgpu_bo *shadow; |
@@ -561,27 +492,6 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, | |||
561 | int amdgpu_mode_dumb_mmap(struct drm_file *filp, | 492 | int amdgpu_mode_dumb_mmap(struct drm_file *filp, |
562 | struct drm_device *dev, | 493 | struct drm_device *dev, |
563 | uint32_t handle, uint64_t *offset_p); | 494 | uint32_t handle, uint64_t *offset_p); |
564 | /* | ||
565 | * Synchronization | ||
566 | */ | ||
567 | struct amdgpu_sync { | ||
568 | DECLARE_HASHTABLE(fences, 4); | ||
569 | struct fence *last_vm_update; | ||
570 | }; | ||
571 | |||
572 | void amdgpu_sync_create(struct amdgpu_sync *sync); | ||
573 | int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, | ||
574 | struct fence *f); | ||
575 | int amdgpu_sync_resv(struct amdgpu_device *adev, | ||
576 | struct amdgpu_sync *sync, | ||
577 | struct reservation_object *resv, | ||
578 | void *owner); | ||
579 | struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, | ||
580 | struct amdgpu_ring *ring); | ||
581 | struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); | ||
582 | void amdgpu_sync_free(struct amdgpu_sync *sync); | ||
583 | int amdgpu_sync_init(void); | ||
584 | void amdgpu_sync_fini(void); | ||
585 | int amdgpu_fence_slab_init(void); | 495 | int amdgpu_fence_slab_init(void); |
586 | void amdgpu_fence_slab_fini(void); | 496 | void amdgpu_fence_slab_fini(void); |
587 | 497 | ||
@@ -723,14 +633,6 @@ struct amdgpu_ib { | |||
723 | uint32_t flags; | 633 | uint32_t flags; |
724 | }; | 634 | }; |
725 | 635 | ||
726 | enum amdgpu_ring_type { | ||
727 | AMDGPU_RING_TYPE_GFX, | ||
728 | AMDGPU_RING_TYPE_COMPUTE, | ||
729 | AMDGPU_RING_TYPE_SDMA, | ||
730 | AMDGPU_RING_TYPE_UVD, | ||
731 | AMDGPU_RING_TYPE_VCE | ||
732 | }; | ||
733 | |||
734 | extern const struct amd_sched_backend_ops amdgpu_sched_ops; | 636 | extern const struct amd_sched_backend_ops amdgpu_sched_ops; |
735 | 637 | ||
736 | int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, | 638 | int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, |
@@ -744,213 +646,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, | |||
744 | struct amd_sched_entity *entity, void *owner, | 646 | struct amd_sched_entity *entity, void *owner, |
745 | struct fence **f); | 647 | struct fence **f); |
746 | 648 | ||
747 | struct amdgpu_ring { | ||
748 | struct amdgpu_device *adev; | ||
749 | const struct amdgpu_ring_funcs *funcs; | ||
750 | struct amdgpu_fence_driver fence_drv; | ||
751 | struct amd_gpu_scheduler sched; | ||
752 | |||
753 | struct amdgpu_bo *ring_obj; | ||
754 | volatile uint32_t *ring; | ||
755 | unsigned rptr_offs; | ||
756 | unsigned wptr; | ||
757 | unsigned wptr_old; | ||
758 | unsigned ring_size; | ||
759 | unsigned max_dw; | ||
760 | int count_dw; | ||
761 | uint64_t gpu_addr; | ||
762 | uint32_t align_mask; | ||
763 | uint32_t ptr_mask; | ||
764 | bool ready; | ||
765 | u32 nop; | ||
766 | u32 idx; | ||
767 | u32 me; | ||
768 | u32 pipe; | ||
769 | u32 queue; | ||
770 | struct amdgpu_bo *mqd_obj; | ||
771 | u32 doorbell_index; | ||
772 | bool use_doorbell; | ||
773 | unsigned wptr_offs; | ||
774 | unsigned fence_offs; | ||
775 | uint64_t current_ctx; | ||
776 | enum amdgpu_ring_type type; | ||
777 | char name[16]; | ||
778 | unsigned cond_exe_offs; | ||
779 | u64 cond_exe_gpu_addr; | ||
780 | volatile u32 *cond_exe_cpu_addr; | ||
781 | #if defined(CONFIG_DEBUG_FS) | ||
782 | struct dentry *ent; | ||
783 | #endif | ||
784 | }; | ||
785 | |||
786 | /* | ||
787 | * VM | ||
788 | */ | ||
789 | |||
790 | /* maximum number of VMIDs */ | ||
791 | #define AMDGPU_NUM_VM 16 | ||
792 | |||
793 | /* Maximum number of PTEs the hardware can write with one command */ | ||
794 | #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF | ||
795 | |||
796 | /* number of entries in page table */ | ||
797 | #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size) | ||
798 | |||
799 | /* PTBs (Page Table Blocks) need to be aligned to 32K */ | ||
800 | #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 | ||
801 | |||
802 | /* LOG2 number of continuous pages for the fragment field */ | ||
803 | #define AMDGPU_LOG2_PAGES_PER_FRAG 4 | ||
804 | |||
805 | #define AMDGPU_PTE_VALID (1 << 0) | ||
806 | #define AMDGPU_PTE_SYSTEM (1 << 1) | ||
807 | #define AMDGPU_PTE_SNOOPED (1 << 2) | ||
808 | |||
809 | /* VI only */ | ||
810 | #define AMDGPU_PTE_EXECUTABLE (1 << 4) | ||
811 | |||
812 | #define AMDGPU_PTE_READABLE (1 << 5) | ||
813 | #define AMDGPU_PTE_WRITEABLE (1 << 6) | ||
814 | |||
815 | #define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7) | ||
816 | |||
817 | /* How to programm VM fault handling */ | ||
818 | #define AMDGPU_VM_FAULT_STOP_NEVER 0 | ||
819 | #define AMDGPU_VM_FAULT_STOP_FIRST 1 | ||
820 | #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 | ||
821 | |||
822 | struct amdgpu_vm_pt { | ||
823 | struct amdgpu_bo_list_entry entry; | ||
824 | uint64_t addr; | ||
825 | uint64_t shadow_addr; | ||
826 | }; | ||
827 | |||
828 | struct amdgpu_vm { | ||
829 | /* tree of virtual addresses mapped */ | ||
830 | struct rb_root va; | ||
831 | |||
832 | /* protecting invalidated */ | ||
833 | spinlock_t status_lock; | ||
834 | |||
835 | /* BOs moved, but not yet updated in the PT */ | ||
836 | struct list_head invalidated; | ||
837 | |||
838 | /* BOs cleared in the PT because of a move */ | ||
839 | struct list_head cleared; | ||
840 | |||
841 | /* BO mappings freed, but not yet updated in the PT */ | ||
842 | struct list_head freed; | ||
843 | |||
844 | /* contains the page directory */ | ||
845 | struct amdgpu_bo *page_directory; | ||
846 | unsigned max_pde_used; | ||
847 | struct fence *page_directory_fence; | ||
848 | uint64_t last_eviction_counter; | ||
849 | |||
850 | /* array of page tables, one for each page directory entry */ | ||
851 | struct amdgpu_vm_pt *page_tables; | ||
852 | |||
853 | /* for id and flush management per ring */ | ||
854 | struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS]; | ||
855 | |||
856 | /* protecting freed */ | ||
857 | spinlock_t freed_lock; | ||
858 | |||
859 | /* Scheduler entity for page table updates */ | ||
860 | struct amd_sched_entity entity; | ||
861 | |||
862 | /* client id */ | ||
863 | u64 client_id; | ||
864 | }; | ||
865 | |||
866 | struct amdgpu_vm_id { | ||
867 | struct list_head list; | ||
868 | struct fence *first; | ||
869 | struct amdgpu_sync active; | ||
870 | struct fence *last_flush; | ||
871 | atomic64_t owner; | ||
872 | |||
873 | uint64_t pd_gpu_addr; | ||
874 | /* last flushed PD/PT update */ | ||
875 | struct fence *flushed_updates; | ||
876 | |||
877 | uint32_t current_gpu_reset_count; | ||
878 | |||
879 | uint32_t gds_base; | ||
880 | uint32_t gds_size; | ||
881 | uint32_t gws_base; | ||
882 | uint32_t gws_size; | ||
883 | uint32_t oa_base; | ||
884 | uint32_t oa_size; | ||
885 | }; | ||
886 | |||
887 | struct amdgpu_vm_manager { | ||
888 | /* Handling of VMIDs */ | ||
889 | struct mutex lock; | ||
890 | unsigned num_ids; | ||
891 | struct list_head ids_lru; | ||
892 | struct amdgpu_vm_id ids[AMDGPU_NUM_VM]; | ||
893 | |||
894 | /* Handling of VM fences */ | ||
895 | u64 fence_context; | ||
896 | unsigned seqno[AMDGPU_MAX_RINGS]; | ||
897 | |||
898 | uint32_t max_pfn; | ||
899 | /* vram base address for page table entry */ | ||
900 | u64 vram_base_offset; | ||
901 | /* is vm enabled? */ | ||
902 | bool enabled; | ||
903 | /* vm pte handling */ | ||
904 | const struct amdgpu_vm_pte_funcs *vm_pte_funcs; | ||
905 | struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; | ||
906 | unsigned vm_pte_num_rings; | ||
907 | atomic_t vm_pte_next_ring; | ||
908 | /* client id counter */ | ||
909 | atomic64_t client_counter; | ||
910 | }; | ||
911 | |||
912 | void amdgpu_vm_manager_init(struct amdgpu_device *adev); | ||
913 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev); | ||
914 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
915 | void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
916 | void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, | ||
917 | struct list_head *validated, | ||
918 | struct amdgpu_bo_list_entry *entry); | ||
919 | void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, | ||
920 | struct list_head *duplicates); | ||
921 | void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, | ||
922 | struct amdgpu_vm *vm); | ||
923 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | ||
924 | struct amdgpu_sync *sync, struct fence *fence, | ||
925 | struct amdgpu_job *job); | ||
926 | int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); | ||
927 | void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); | ||
928 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | ||
929 | struct amdgpu_vm *vm); | ||
930 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | ||
931 | struct amdgpu_vm *vm); | ||
932 | int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, | ||
933 | struct amdgpu_sync *sync); | ||
934 | int amdgpu_vm_bo_update(struct amdgpu_device *adev, | ||
935 | struct amdgpu_bo_va *bo_va, | ||
936 | bool clear); | ||
937 | void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, | ||
938 | struct amdgpu_bo *bo); | ||
939 | struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | ||
940 | struct amdgpu_bo *bo); | ||
941 | struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | ||
942 | struct amdgpu_vm *vm, | ||
943 | struct amdgpu_bo *bo); | ||
944 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, | ||
945 | struct amdgpu_bo_va *bo_va, | ||
946 | uint64_t addr, uint64_t offset, | ||
947 | uint64_t size, uint32_t flags); | ||
948 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | ||
949 | struct amdgpu_bo_va *bo_va, | ||
950 | uint64_t addr); | ||
951 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | ||
952 | struct amdgpu_bo_va *bo_va); | ||
953 | |||
954 | /* | 649 | /* |
955 | * context related structures | 650 | * context related structures |
956 | */ | 651 | */ |
@@ -1093,6 +788,16 @@ struct amdgpu_scratch { | |||
1093 | /* | 788 | /* |
1094 | * GFX configurations | 789 | * GFX configurations |
1095 | */ | 790 | */ |
791 | #define AMDGPU_GFX_MAX_SE 4 | ||
792 | #define AMDGPU_GFX_MAX_SH_PER_SE 2 | ||
793 | |||
794 | struct amdgpu_rb_config { | ||
795 | uint32_t rb_backend_disable; | ||
796 | uint32_t user_rb_backend_disable; | ||
797 | uint32_t raster_config; | ||
798 | uint32_t raster_config_1; | ||
799 | }; | ||
800 | |||
1096 | struct amdgpu_gca_config { | 801 | struct amdgpu_gca_config { |
1097 | unsigned max_shader_engines; | 802 | unsigned max_shader_engines; |
1098 | unsigned max_tile_pipes; | 803 | unsigned max_tile_pipes; |
@@ -1121,6 +826,8 @@ struct amdgpu_gca_config { | |||
1121 | 826 | ||
1122 | uint32_t tile_mode_array[32]; | 827 | uint32_t tile_mode_array[32]; |
1123 | uint32_t macrotile_mode_array[16]; | 828 | uint32_t macrotile_mode_array[16]; |
829 | |||
830 | struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE]; | ||
1124 | }; | 831 | }; |
1125 | 832 | ||
1126 | struct amdgpu_cu_info { | 833 | struct amdgpu_cu_info { |
@@ -1133,6 +840,7 @@ struct amdgpu_gfx_funcs { | |||
1133 | /* get the gpu clock counter */ | 840 | /* get the gpu clock counter */ |
1134 | uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); | 841 | uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); |
1135 | void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance); | 842 | void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance); |
843 | void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields); | ||
1136 | }; | 844 | }; |
1137 | 845 | ||
1138 | struct amdgpu_gfx { | 846 | struct amdgpu_gfx { |
@@ -1188,16 +896,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
1188 | int amdgpu_ib_pool_init(struct amdgpu_device *adev); | 896 | int amdgpu_ib_pool_init(struct amdgpu_device *adev); |
1189 | void amdgpu_ib_pool_fini(struct amdgpu_device *adev); | 897 | void amdgpu_ib_pool_fini(struct amdgpu_device *adev); |
1190 | int amdgpu_ib_ring_tests(struct amdgpu_device *adev); | 898 | int amdgpu_ib_ring_tests(struct amdgpu_device *adev); |
1191 | int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); | ||
1192 | void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); | ||
1193 | void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); | ||
1194 | void amdgpu_ring_commit(struct amdgpu_ring *ring); | ||
1195 | void amdgpu_ring_undo(struct amdgpu_ring *ring); | ||
1196 | int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, | ||
1197 | unsigned ring_size, u32 nop, u32 align_mask, | ||
1198 | struct amdgpu_irq_src *irq_src, unsigned irq_type, | ||
1199 | enum amdgpu_ring_type ring_type); | ||
1200 | void amdgpu_ring_fini(struct amdgpu_ring *ring); | ||
1201 | 899 | ||
1202 | /* | 900 | /* |
1203 | * CS. | 901 | * CS. |
@@ -1294,354 +992,6 @@ struct amdgpu_wb { | |||
1294 | int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb); | 992 | int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb); |
1295 | void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb); | 993 | void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb); |
1296 | 994 | ||
1297 | |||
1298 | |||
1299 | enum amdgpu_int_thermal_type { | ||
1300 | THERMAL_TYPE_NONE, | ||
1301 | THERMAL_TYPE_EXTERNAL, | ||
1302 | THERMAL_TYPE_EXTERNAL_GPIO, | ||
1303 | THERMAL_TYPE_RV6XX, | ||
1304 | THERMAL_TYPE_RV770, | ||
1305 | THERMAL_TYPE_ADT7473_WITH_INTERNAL, | ||
1306 | THERMAL_TYPE_EVERGREEN, | ||
1307 | THERMAL_TYPE_SUMO, | ||
1308 | THERMAL_TYPE_NI, | ||
1309 | THERMAL_TYPE_SI, | ||
1310 | THERMAL_TYPE_EMC2103_WITH_INTERNAL, | ||
1311 | THERMAL_TYPE_CI, | ||
1312 | THERMAL_TYPE_KV, | ||
1313 | }; | ||
1314 | |||
1315 | enum amdgpu_dpm_auto_throttle_src { | ||
1316 | AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, | ||
1317 | AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL | ||
1318 | }; | ||
1319 | |||
1320 | enum amdgpu_dpm_event_src { | ||
1321 | AMDGPU_DPM_EVENT_SRC_ANALOG = 0, | ||
1322 | AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1, | ||
1323 | AMDGPU_DPM_EVENT_SRC_DIGITAL = 2, | ||
1324 | AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, | ||
1325 | AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4 | ||
1326 | }; | ||
1327 | |||
1328 | #define AMDGPU_MAX_VCE_LEVELS 6 | ||
1329 | |||
1330 | enum amdgpu_vce_level { | ||
1331 | AMDGPU_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */ | ||
1332 | AMDGPU_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */ | ||
1333 | AMDGPU_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */ | ||
1334 | AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */ | ||
1335 | AMDGPU_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */ | ||
1336 | AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */ | ||
1337 | }; | ||
1338 | |||
1339 | struct amdgpu_ps { | ||
1340 | u32 caps; /* vbios flags */ | ||
1341 | u32 class; /* vbios flags */ | ||
1342 | u32 class2; /* vbios flags */ | ||
1343 | /* UVD clocks */ | ||
1344 | u32 vclk; | ||
1345 | u32 dclk; | ||
1346 | /* VCE clocks */ | ||
1347 | u32 evclk; | ||
1348 | u32 ecclk; | ||
1349 | bool vce_active; | ||
1350 | enum amdgpu_vce_level vce_level; | ||
1351 | /* asic priv */ | ||
1352 | void *ps_priv; | ||
1353 | }; | ||
1354 | |||
1355 | struct amdgpu_dpm_thermal { | ||
1356 | /* thermal interrupt work */ | ||
1357 | struct work_struct work; | ||
1358 | /* low temperature threshold */ | ||
1359 | int min_temp; | ||
1360 | /* high temperature threshold */ | ||
1361 | int max_temp; | ||
1362 | /* was last interrupt low to high or high to low */ | ||
1363 | bool high_to_low; | ||
1364 | /* interrupt source */ | ||
1365 | struct amdgpu_irq_src irq; | ||
1366 | }; | ||
1367 | |||
1368 | enum amdgpu_clk_action | ||
1369 | { | ||
1370 | AMDGPU_SCLK_UP = 1, | ||
1371 | AMDGPU_SCLK_DOWN | ||
1372 | }; | ||
1373 | |||
1374 | struct amdgpu_blacklist_clocks | ||
1375 | { | ||
1376 | u32 sclk; | ||
1377 | u32 mclk; | ||
1378 | enum amdgpu_clk_action action; | ||
1379 | }; | ||
1380 | |||
1381 | struct amdgpu_clock_and_voltage_limits { | ||
1382 | u32 sclk; | ||
1383 | u32 mclk; | ||
1384 | u16 vddc; | ||
1385 | u16 vddci; | ||
1386 | }; | ||
1387 | |||
1388 | struct amdgpu_clock_array { | ||
1389 | u32 count; | ||
1390 | u32 *values; | ||
1391 | }; | ||
1392 | |||
1393 | struct amdgpu_clock_voltage_dependency_entry { | ||
1394 | u32 clk; | ||
1395 | u16 v; | ||
1396 | }; | ||
1397 | |||
1398 | struct amdgpu_clock_voltage_dependency_table { | ||
1399 | u32 count; | ||
1400 | struct amdgpu_clock_voltage_dependency_entry *entries; | ||
1401 | }; | ||
1402 | |||
1403 | union amdgpu_cac_leakage_entry { | ||
1404 | struct { | ||
1405 | u16 vddc; | ||
1406 | u32 leakage; | ||
1407 | }; | ||
1408 | struct { | ||
1409 | u16 vddc1; | ||
1410 | u16 vddc2; | ||
1411 | u16 vddc3; | ||
1412 | }; | ||
1413 | }; | ||
1414 | |||
1415 | struct amdgpu_cac_leakage_table { | ||
1416 | u32 count; | ||
1417 | union amdgpu_cac_leakage_entry *entries; | ||
1418 | }; | ||
1419 | |||
1420 | struct amdgpu_phase_shedding_limits_entry { | ||
1421 | u16 voltage; | ||
1422 | u32 sclk; | ||
1423 | u32 mclk; | ||
1424 | }; | ||
1425 | |||
1426 | struct amdgpu_phase_shedding_limits_table { | ||
1427 | u32 count; | ||
1428 | struct amdgpu_phase_shedding_limits_entry *entries; | ||
1429 | }; | ||
1430 | |||
1431 | struct amdgpu_uvd_clock_voltage_dependency_entry { | ||
1432 | u32 vclk; | ||
1433 | u32 dclk; | ||
1434 | u16 v; | ||
1435 | }; | ||
1436 | |||
1437 | struct amdgpu_uvd_clock_voltage_dependency_table { | ||
1438 | u8 count; | ||
1439 | struct amdgpu_uvd_clock_voltage_dependency_entry *entries; | ||
1440 | }; | ||
1441 | |||
1442 | struct amdgpu_vce_clock_voltage_dependency_entry { | ||
1443 | u32 ecclk; | ||
1444 | u32 evclk; | ||
1445 | u16 v; | ||
1446 | }; | ||
1447 | |||
1448 | struct amdgpu_vce_clock_voltage_dependency_table { | ||
1449 | u8 count; | ||
1450 | struct amdgpu_vce_clock_voltage_dependency_entry *entries; | ||
1451 | }; | ||
1452 | |||
1453 | struct amdgpu_ppm_table { | ||
1454 | u8 ppm_design; | ||
1455 | u16 cpu_core_number; | ||
1456 | u32 platform_tdp; | ||
1457 | u32 small_ac_platform_tdp; | ||
1458 | u32 platform_tdc; | ||
1459 | u32 small_ac_platform_tdc; | ||
1460 | u32 apu_tdp; | ||
1461 | u32 dgpu_tdp; | ||
1462 | u32 dgpu_ulv_power; | ||
1463 | u32 tj_max; | ||
1464 | }; | ||
1465 | |||
1466 | struct amdgpu_cac_tdp_table { | ||
1467 | u16 tdp; | ||
1468 | u16 configurable_tdp; | ||
1469 | u16 tdc; | ||
1470 | u16 battery_power_limit; | ||
1471 | u16 small_power_limit; | ||
1472 | u16 low_cac_leakage; | ||
1473 | u16 high_cac_leakage; | ||
1474 | u16 maximum_power_delivery_limit; | ||
1475 | }; | ||
1476 | |||
1477 | struct amdgpu_dpm_dynamic_state { | ||
1478 | struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk; | ||
1479 | struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk; | ||
1480 | struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk; | ||
1481 | struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk; | ||
1482 | struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk; | ||
1483 | struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table; | ||
1484 | struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table; | ||
1485 | struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table; | ||
1486 | struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table; | ||
1487 | struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk; | ||
1488 | struct amdgpu_clock_array valid_sclk_values; | ||
1489 | struct amdgpu_clock_array valid_mclk_values; | ||
1490 | struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc; | ||
1491 | struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac; | ||
1492 | u32 mclk_sclk_ratio; | ||
1493 | u32 sclk_mclk_delta; | ||
1494 | u16 vddc_vddci_delta; | ||
1495 | u16 min_vddc_for_pcie_gen2; | ||
1496 | struct amdgpu_cac_leakage_table cac_leakage_table; | ||
1497 | struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table; | ||
1498 | struct amdgpu_ppm_table *ppm_table; | ||
1499 | struct amdgpu_cac_tdp_table *cac_tdp_table; | ||
1500 | }; | ||
1501 | |||
1502 | struct amdgpu_dpm_fan { | ||
1503 | u16 t_min; | ||
1504 | u16 t_med; | ||
1505 | u16 t_high; | ||
1506 | u16 pwm_min; | ||
1507 | u16 pwm_med; | ||
1508 | u16 pwm_high; | ||
1509 | u8 t_hyst; | ||
1510 | u32 cycle_delay; | ||
1511 | u16 t_max; | ||
1512 | u8 control_mode; | ||
1513 | u16 default_max_fan_pwm; | ||
1514 | u16 default_fan_output_sensitivity; | ||
1515 | u16 fan_output_sensitivity; | ||
1516 | bool ucode_fan_control; | ||
1517 | }; | ||
1518 | |||
1519 | enum amdgpu_pcie_gen { | ||
1520 | AMDGPU_PCIE_GEN1 = 0, | ||
1521 | AMDGPU_PCIE_GEN2 = 1, | ||
1522 | AMDGPU_PCIE_GEN3 = 2, | ||
1523 | AMDGPU_PCIE_GEN_INVALID = 0xffff | ||
1524 | }; | ||
1525 | |||
1526 | enum amdgpu_dpm_forced_level { | ||
1527 | AMDGPU_DPM_FORCED_LEVEL_AUTO = 0, | ||
1528 | AMDGPU_DPM_FORCED_LEVEL_LOW = 1, | ||
1529 | AMDGPU_DPM_FORCED_LEVEL_HIGH = 2, | ||
1530 | AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3, | ||
1531 | }; | ||
1532 | |||
1533 | struct amdgpu_vce_state { | ||
1534 | /* vce clocks */ | ||
1535 | u32 evclk; | ||
1536 | u32 ecclk; | ||
1537 | /* gpu clocks */ | ||
1538 | u32 sclk; | ||
1539 | u32 mclk; | ||
1540 | u8 clk_idx; | ||
1541 | u8 pstate; | ||
1542 | }; | ||
1543 | |||
1544 | struct amdgpu_dpm_funcs { | ||
1545 | int (*get_temperature)(struct amdgpu_device *adev); | ||
1546 | int (*pre_set_power_state)(struct amdgpu_device *adev); | ||
1547 | int (*set_power_state)(struct amdgpu_device *adev); | ||
1548 | void (*post_set_power_state)(struct amdgpu_device *adev); | ||
1549 | void (*display_configuration_changed)(struct amdgpu_device *adev); | ||
1550 | u32 (*get_sclk)(struct amdgpu_device *adev, bool low); | ||
1551 | u32 (*get_mclk)(struct amdgpu_device *adev, bool low); | ||
1552 | void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps); | ||
1553 | void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m); | ||
1554 | int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level); | ||
1555 | bool (*vblank_too_short)(struct amdgpu_device *adev); | ||
1556 | void (*powergate_uvd)(struct amdgpu_device *adev, bool gate); | ||
1557 | void (*powergate_vce)(struct amdgpu_device *adev, bool gate); | ||
1558 | void (*enable_bapm)(struct amdgpu_device *adev, bool enable); | ||
1559 | void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode); | ||
1560 | u32 (*get_fan_control_mode)(struct amdgpu_device *adev); | ||
1561 | int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed); | ||
1562 | int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed); | ||
1563 | int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask); | ||
1564 | int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf); | ||
1565 | int (*get_sclk_od)(struct amdgpu_device *adev); | ||
1566 | int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value); | ||
1567 | int (*get_mclk_od)(struct amdgpu_device *adev); | ||
1568 | int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value); | ||
1569 | }; | ||
1570 | |||
1571 | struct amdgpu_dpm { | ||
1572 | struct amdgpu_ps *ps; | ||
1573 | /* number of valid power states */ | ||
1574 | int num_ps; | ||
1575 | /* current power state that is active */ | ||
1576 | struct amdgpu_ps *current_ps; | ||
1577 | /* requested power state */ | ||
1578 | struct amdgpu_ps *requested_ps; | ||
1579 | /* boot up power state */ | ||
1580 | struct amdgpu_ps *boot_ps; | ||
1581 | /* default uvd power state */ | ||
1582 | struct amdgpu_ps *uvd_ps; | ||
1583 | /* vce requirements */ | ||
1584 | struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS]; | ||
1585 | enum amdgpu_vce_level vce_level; | ||
1586 | enum amd_pm_state_type state; | ||
1587 | enum amd_pm_state_type user_state; | ||
1588 | u32 platform_caps; | ||
1589 | u32 voltage_response_time; | ||
1590 | u32 backbias_response_time; | ||
1591 | void *priv; | ||
1592 | u32 new_active_crtcs; | ||
1593 | int new_active_crtc_count; | ||
1594 | u32 current_active_crtcs; | ||
1595 | int current_active_crtc_count; | ||
1596 | struct amdgpu_dpm_dynamic_state dyn_state; | ||
1597 | struct amdgpu_dpm_fan fan; | ||
1598 | u32 tdp_limit; | ||
1599 | u32 near_tdp_limit; | ||
1600 | u32 near_tdp_limit_adjusted; | ||
1601 | u32 sq_ramping_threshold; | ||
1602 | u32 cac_leakage; | ||
1603 | u16 tdp_od_limit; | ||
1604 | u32 tdp_adjustment; | ||
1605 | u16 load_line_slope; | ||
1606 | bool power_control; | ||
1607 | bool ac_power; | ||
1608 | /* special states active */ | ||
1609 | bool thermal_active; | ||
1610 | bool uvd_active; | ||
1611 | bool vce_active; | ||
1612 | /* thermal handling */ | ||
1613 | struct amdgpu_dpm_thermal thermal; | ||
1614 | /* forced levels */ | ||
1615 | enum amdgpu_dpm_forced_level forced_level; | ||
1616 | }; | ||
1617 | |||
1618 | struct amdgpu_pm { | ||
1619 | struct mutex mutex; | ||
1620 | u32 current_sclk; | ||
1621 | u32 current_mclk; | ||
1622 | u32 default_sclk; | ||
1623 | u32 default_mclk; | ||
1624 | struct amdgpu_i2c_chan *i2c_bus; | ||
1625 | /* internal thermal controller on rv6xx+ */ | ||
1626 | enum amdgpu_int_thermal_type int_thermal_type; | ||
1627 | struct device *int_hwmon_dev; | ||
1628 | /* fan control parameters */ | ||
1629 | bool no_fan; | ||
1630 | u8 fan_pulses_per_revolution; | ||
1631 | u8 fan_min_rpm; | ||
1632 | u8 fan_max_rpm; | ||
1633 | /* dpm */ | ||
1634 | bool dpm_enabled; | ||
1635 | bool sysfs_initialized; | ||
1636 | struct amdgpu_dpm dpm; | ||
1637 | const struct firmware *fw; /* SMC firmware */ | ||
1638 | uint32_t fw_version; | ||
1639 | const struct amdgpu_dpm_funcs *funcs; | ||
1640 | uint32_t pcie_gen_mask; | ||
1641 | uint32_t pcie_mlw_mask; | ||
1642 | struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */ | ||
1643 | }; | ||
1644 | |||
1645 | void amdgpu_get_pcie_info(struct amdgpu_device *adev); | 995 | void amdgpu_get_pcie_info(struct amdgpu_device *adev); |
1646 | 996 | ||
1647 | /* | 997 | /* |
@@ -1939,14 +1289,6 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); | |||
1939 | typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); | 1289 | typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); |
1940 | typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); | 1290 | typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); |
1941 | 1291 | ||
1942 | struct amdgpu_ip_block_status { | ||
1943 | bool valid; | ||
1944 | bool sw; | ||
1945 | bool hw; | ||
1946 | bool late_initialized; | ||
1947 | bool hang; | ||
1948 | }; | ||
1949 | |||
1950 | struct amdgpu_device { | 1292 | struct amdgpu_device { |
1951 | struct device *dev; | 1293 | struct device *dev; |
1952 | struct drm_device *ddev; | 1294 | struct drm_device *ddev; |
@@ -2102,9 +1444,8 @@ struct amdgpu_device { | |||
2102 | /* GDS */ | 1444 | /* GDS */ |
2103 | struct amdgpu_gds gds; | 1445 | struct amdgpu_gds gds; |
2104 | 1446 | ||
2105 | const struct amdgpu_ip_block_version *ip_blocks; | 1447 | struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM]; |
2106 | int num_ip_blocks; | 1448 | int num_ip_blocks; |
2107 | struct amdgpu_ip_block_status *ip_block_status; | ||
2108 | struct mutex mn_lock; | 1449 | struct mutex mn_lock; |
2109 | DECLARE_HASHTABLE(mn_hash, 7); | 1450 | DECLARE_HASHTABLE(mn_hash, 7); |
2110 | 1451 | ||
@@ -2127,6 +1468,11 @@ struct amdgpu_device { | |||
2127 | 1468 | ||
2128 | }; | 1469 | }; |
2129 | 1470 | ||
1471 | static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) | ||
1472 | { | ||
1473 | return container_of(bdev, struct amdgpu_device, mman.bdev); | ||
1474 | } | ||
1475 | |||
2130 | bool amdgpu_device_is_px(struct drm_device *dev); | 1476 | bool amdgpu_device_is_px(struct drm_device *dev); |
2131 | int amdgpu_device_init(struct amdgpu_device *adev, | 1477 | int amdgpu_device_init(struct amdgpu_device *adev, |
2132 | struct drm_device *ddev, | 1478 | struct drm_device *ddev, |
@@ -2278,8 +1624,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) | |||
2278 | #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) | 1624 | #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) |
2279 | #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) | 1625 | #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) |
2280 | #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) | 1626 | #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) |
2281 | #define amdgpu_ring_get_emit_ib_size(r) (r)->funcs->get_emit_ib_size((r)) | ||
2282 | #define amdgpu_ring_get_dma_frame_size(r) (r)->funcs->get_dma_frame_size((r)) | ||
2283 | #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) | 1627 | #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) |
2284 | #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) | 1628 | #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) |
2285 | #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) | 1629 | #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) |
@@ -2301,108 +1645,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) | |||
2301 | #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s)) | 1645 | #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s)) |
2302 | #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) | 1646 | #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) |
2303 | #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) | 1647 | #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) |
2304 | #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev)) | ||
2305 | #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev)) | ||
2306 | #define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev)) | ||
2307 | #define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev)) | ||
2308 | #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps)) | ||
2309 | #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev)) | ||
2310 | #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) | ||
2311 | #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) | 1648 | #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) |
2312 | #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) | 1649 | #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) |
2313 | |||
2314 | #define amdgpu_dpm_read_sensor(adev, idx, value) \ | ||
2315 | ((adev)->pp_enabled ? \ | ||
2316 | (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \ | ||
2317 | -EINVAL) | ||
2318 | |||
2319 | #define amdgpu_dpm_get_temperature(adev) \ | ||
2320 | ((adev)->pp_enabled ? \ | ||
2321 | (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \ | ||
2322 | (adev)->pm.funcs->get_temperature((adev))) | ||
2323 | |||
2324 | #define amdgpu_dpm_set_fan_control_mode(adev, m) \ | ||
2325 | ((adev)->pp_enabled ? \ | ||
2326 | (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \ | ||
2327 | (adev)->pm.funcs->set_fan_control_mode((adev), (m))) | ||
2328 | |||
2329 | #define amdgpu_dpm_get_fan_control_mode(adev) \ | ||
2330 | ((adev)->pp_enabled ? \ | ||
2331 | (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \ | ||
2332 | (adev)->pm.funcs->get_fan_control_mode((adev))) | ||
2333 | |||
2334 | #define amdgpu_dpm_set_fan_speed_percent(adev, s) \ | ||
2335 | ((adev)->pp_enabled ? \ | ||
2336 | (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ | ||
2337 | (adev)->pm.funcs->set_fan_speed_percent((adev), (s))) | ||
2338 | |||
2339 | #define amdgpu_dpm_get_fan_speed_percent(adev, s) \ | ||
2340 | ((adev)->pp_enabled ? \ | ||
2341 | (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ | ||
2342 | (adev)->pm.funcs->get_fan_speed_percent((adev), (s))) | ||
2343 | |||
2344 | #define amdgpu_dpm_get_sclk(adev, l) \ | ||
2345 | ((adev)->pp_enabled ? \ | ||
2346 | (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \ | ||
2347 | (adev)->pm.funcs->get_sclk((adev), (l))) | ||
2348 | |||
2349 | #define amdgpu_dpm_get_mclk(adev, l) \ | ||
2350 | ((adev)->pp_enabled ? \ | ||
2351 | (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \ | ||
2352 | (adev)->pm.funcs->get_mclk((adev), (l))) | ||
2353 | |||
2354 | |||
2355 | #define amdgpu_dpm_force_performance_level(adev, l) \ | ||
2356 | ((adev)->pp_enabled ? \ | ||
2357 | (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \ | ||
2358 | (adev)->pm.funcs->force_performance_level((adev), (l))) | ||
2359 | |||
2360 | #define amdgpu_dpm_powergate_uvd(adev, g) \ | ||
2361 | ((adev)->pp_enabled ? \ | ||
2362 | (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \ | ||
2363 | (adev)->pm.funcs->powergate_uvd((adev), (g))) | ||
2364 | |||
2365 | #define amdgpu_dpm_powergate_vce(adev, g) \ | ||
2366 | ((adev)->pp_enabled ? \ | ||
2367 | (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \ | ||
2368 | (adev)->pm.funcs->powergate_vce((adev), (g))) | ||
2369 | |||
2370 | #define amdgpu_dpm_get_current_power_state(adev) \ | ||
2371 | (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle) | ||
2372 | |||
2373 | #define amdgpu_dpm_get_performance_level(adev) \ | ||
2374 | (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) | ||
2375 | |||
2376 | #define amdgpu_dpm_get_pp_num_states(adev, data) \ | ||
2377 | (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data) | ||
2378 | |||
2379 | #define amdgpu_dpm_get_pp_table(adev, table) \ | ||
2380 | (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table) | ||
2381 | |||
2382 | #define amdgpu_dpm_set_pp_table(adev, buf, size) \ | ||
2383 | (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size) | ||
2384 | |||
2385 | #define amdgpu_dpm_print_clock_levels(adev, type, buf) \ | ||
2386 | (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf) | ||
2387 | |||
2388 | #define amdgpu_dpm_force_clock_level(adev, type, level) \ | ||
2389 | (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level) | ||
2390 | |||
2391 | #define amdgpu_dpm_get_sclk_od(adev) \ | ||
2392 | (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle) | ||
2393 | |||
2394 | #define amdgpu_dpm_set_sclk_od(adev, value) \ | ||
2395 | (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value) | ||
2396 | |||
2397 | #define amdgpu_dpm_get_mclk_od(adev) \ | ||
2398 | ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle)) | ||
2399 | |||
2400 | #define amdgpu_dpm_set_mclk_od(adev, value) \ | ||
2401 | ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value)) | ||
2402 | |||
2403 | #define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \ | ||
2404 | (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output)) | ||
2405 | |||
2406 | #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a)) | 1650 | #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a)) |
2407 | 1651 | ||
2408 | /* Common functions */ | 1652 | /* Common functions */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 892d60fb225b..2f9f96cc9f65 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c | |||
@@ -265,14 +265,14 @@ static int acp_hw_init(void *handle) | |||
265 | 265 | ||
266 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 266 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
267 | 267 | ||
268 | const struct amdgpu_ip_block_version *ip_version = | 268 | const struct amdgpu_ip_block *ip_block = |
269 | amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP); | 269 | amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP); |
270 | 270 | ||
271 | if (!ip_version) | 271 | if (!ip_block) |
272 | return -EINVAL; | 272 | return -EINVAL; |
273 | 273 | ||
274 | r = amd_acp_hw_init(adev->acp.cgs_device, | 274 | r = amd_acp_hw_init(adev->acp.cgs_device, |
275 | ip_version->major, ip_version->minor); | 275 | ip_block->version->major, ip_block->version->minor); |
276 | /* -ENODEV means board uses AZ rather than ACP */ | 276 | /* -ENODEV means board uses AZ rather than ACP */ |
277 | if (r == -ENODEV) | 277 | if (r == -ENODEV) |
278 | return 0; | 278 | return 0; |
@@ -456,7 +456,7 @@ static int acp_set_powergating_state(void *handle, | |||
456 | return 0; | 456 | return 0; |
457 | } | 457 | } |
458 | 458 | ||
459 | const struct amd_ip_funcs acp_ip_funcs = { | 459 | static const struct amd_ip_funcs acp_ip_funcs = { |
460 | .name = "acp_ip", | 460 | .name = "acp_ip", |
461 | .early_init = acp_early_init, | 461 | .early_init = acp_early_init, |
462 | .late_init = NULL, | 462 | .late_init = NULL, |
@@ -472,3 +472,12 @@ const struct amd_ip_funcs acp_ip_funcs = { | |||
472 | .set_clockgating_state = acp_set_clockgating_state, | 472 | .set_clockgating_state = acp_set_clockgating_state, |
473 | .set_powergating_state = acp_set_powergating_state, | 473 | .set_powergating_state = acp_set_powergating_state, |
474 | }; | 474 | }; |
475 | |||
476 | const struct amdgpu_ip_block_version acp_ip_block = | ||
477 | { | ||
478 | .type = AMD_IP_BLOCK_TYPE_ACP, | ||
479 | .major = 2, | ||
480 | .minor = 2, | ||
481 | .rev = 0, | ||
482 | .funcs = &acp_ip_funcs, | ||
483 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h index 8a396313c86f..a288ce25c176 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h | |||
@@ -37,6 +37,6 @@ struct amdgpu_acp { | |||
37 | struct acp_pm_domain *acp_genpd; | 37 | struct acp_pm_domain *acp_genpd; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | extern const struct amd_ip_funcs acp_ip_funcs; | 40 | extern const struct amdgpu_ip_block_version acp_ip_block; |
41 | 41 | ||
42 | #endif /* __AMDGPU_ACP_H__ */ | 42 | #endif /* __AMDGPU_ACP_H__ */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 8e6bf548d689..56a86dd5789e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | |||
@@ -1115,49 +1115,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev, | |||
1115 | return 0; | 1115 | return 0; |
1116 | } | 1116 | } |
1117 | 1117 | ||
1118 | uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev) | ||
1119 | { | ||
1120 | GET_ENGINE_CLOCK_PS_ALLOCATION args; | ||
1121 | int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock); | ||
1122 | |||
1123 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1124 | return le32_to_cpu(args.ulReturnEngineClock); | ||
1125 | } | ||
1126 | |||
1127 | uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev) | ||
1128 | { | ||
1129 | GET_MEMORY_CLOCK_PS_ALLOCATION args; | ||
1130 | int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock); | ||
1131 | |||
1132 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1133 | return le32_to_cpu(args.ulReturnMemoryClock); | ||
1134 | } | ||
1135 | |||
1136 | void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev, | ||
1137 | uint32_t eng_clock) | ||
1138 | { | ||
1139 | SET_ENGINE_CLOCK_PS_ALLOCATION args; | ||
1140 | int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock); | ||
1141 | |||
1142 | args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */ | ||
1143 | |||
1144 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1145 | } | ||
1146 | |||
1147 | void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev, | ||
1148 | uint32_t mem_clock) | ||
1149 | { | ||
1150 | SET_MEMORY_CLOCK_PS_ALLOCATION args; | ||
1151 | int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock); | ||
1152 | |||
1153 | if (adev->flags & AMD_IS_APU) | ||
1154 | return; | ||
1155 | |||
1156 | args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */ | ||
1157 | |||
1158 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1159 | } | ||
1160 | |||
1161 | void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, | 1118 | void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, |
1162 | u32 eng_clock, u32 mem_clock) | 1119 | u32 eng_clock, u32 mem_clock) |
1163 | { | 1120 | { |
@@ -1256,45 +1213,6 @@ int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device * | |||
1256 | return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage); | 1213 | return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage); |
1257 | } | 1214 | } |
1258 | 1215 | ||
1259 | void amdgpu_atombios_set_voltage(struct amdgpu_device *adev, | ||
1260 | u16 voltage_level, | ||
1261 | u8 voltage_type) | ||
1262 | { | ||
1263 | union set_voltage args; | ||
1264 | int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); | ||
1265 | u8 frev, crev, volt_index = voltage_level; | ||
1266 | |||
1267 | if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev)) | ||
1268 | return; | ||
1269 | |||
1270 | /* 0xff01 is a flag rather then an actual voltage */ | ||
1271 | if (voltage_level == 0xff01) | ||
1272 | return; | ||
1273 | |||
1274 | switch (crev) { | ||
1275 | case 1: | ||
1276 | args.v1.ucVoltageType = voltage_type; | ||
1277 | args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE; | ||
1278 | args.v1.ucVoltageIndex = volt_index; | ||
1279 | break; | ||
1280 | case 2: | ||
1281 | args.v2.ucVoltageType = voltage_type; | ||
1282 | args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE; | ||
1283 | args.v2.usVoltageLevel = cpu_to_le16(voltage_level); | ||
1284 | break; | ||
1285 | case 3: | ||
1286 | args.v3.ucVoltageType = voltage_type; | ||
1287 | args.v3.ucVoltageMode = ATOM_SET_VOLTAGE; | ||
1288 | args.v3.usVoltageLevel = cpu_to_le16(voltage_level); | ||
1289 | break; | ||
1290 | default: | ||
1291 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
1292 | return; | ||
1293 | } | ||
1294 | |||
1295 | amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1296 | } | ||
1297 | |||
1298 | int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev, | 1216 | int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev, |
1299 | u16 *leakage_id) | 1217 | u16 *leakage_id) |
1300 | { | 1218 | { |
@@ -1784,6 +1702,19 @@ void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev) | |||
1784 | WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]); | 1702 | WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]); |
1785 | } | 1703 | } |
1786 | 1704 | ||
1705 | void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev, | ||
1706 | bool hung) | ||
1707 | { | ||
1708 | u32 tmp = RREG32(mmBIOS_SCRATCH_3); | ||
1709 | |||
1710 | if (hung) | ||
1711 | tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; | ||
1712 | else | ||
1713 | tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; | ||
1714 | |||
1715 | WREG32(mmBIOS_SCRATCH_3, tmp); | ||
1716 | } | ||
1717 | |||
1787 | /* Atom needs data in little endian format | 1718 | /* Atom needs data in little endian format |
1788 | * so swap as appropriate when copying data to | 1719 | * so swap as appropriate when copying data to |
1789 | * or from atom. Note that atom operates on | 1720 | * or from atom. Note that atom operates on |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index 17356151db38..70e9acef5d9c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h | |||
@@ -163,16 +163,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev, | |||
163 | bool strobe_mode, | 163 | bool strobe_mode, |
164 | struct atom_mpll_param *mpll_param); | 164 | struct atom_mpll_param *mpll_param); |
165 | 165 | ||
166 | uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device *adev); | ||
167 | uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device *adev); | ||
168 | void amdgpu_atombios_set_engine_clock(struct amdgpu_device *adev, | ||
169 | uint32_t eng_clock); | ||
170 | void amdgpu_atombios_set_memory_clock(struct amdgpu_device *adev, | ||
171 | uint32_t mem_clock); | ||
172 | void amdgpu_atombios_set_voltage(struct amdgpu_device *adev, | ||
173 | u16 voltage_level, | ||
174 | u8 voltage_type); | ||
175 | |||
176 | void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, | 166 | void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev, |
177 | u32 eng_clock, u32 mem_clock); | 167 | u32 eng_clock, u32 mem_clock); |
178 | 168 | ||
@@ -206,6 +196,8 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock); | |||
206 | void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev); | 196 | void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev); |
207 | void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev); | 197 | void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev); |
208 | void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev); | 198 | void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev); |
199 | void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev, | ||
200 | bool hung); | ||
209 | 201 | ||
210 | void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); | 202 | void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); |
211 | int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type, | 203 | int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 7a8bfa34682f..017556ca22e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | |||
@@ -146,7 +146,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, | |||
146 | switch(type) { | 146 | switch(type) { |
147 | case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB: | 147 | case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB: |
148 | case CGS_GPU_MEM_TYPE__VISIBLE_FB: | 148 | case CGS_GPU_MEM_TYPE__VISIBLE_FB: |
149 | flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; | 149 | flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
150 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; | ||
150 | domain = AMDGPU_GEM_DOMAIN_VRAM; | 151 | domain = AMDGPU_GEM_DOMAIN_VRAM; |
151 | if (max_offset > adev->mc.real_vram_size) | 152 | if (max_offset > adev->mc.real_vram_size) |
152 | return -EINVAL; | 153 | return -EINVAL; |
@@ -157,7 +158,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, | |||
157 | break; | 158 | break; |
158 | case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB: | 159 | case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB: |
159 | case CGS_GPU_MEM_TYPE__INVISIBLE_FB: | 160 | case CGS_GPU_MEM_TYPE__INVISIBLE_FB: |
160 | flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS; | 161 | flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS | |
162 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; | ||
161 | domain = AMDGPU_GEM_DOMAIN_VRAM; | 163 | domain = AMDGPU_GEM_DOMAIN_VRAM; |
162 | if (adev->mc.visible_vram_size < adev->mc.real_vram_size) { | 164 | if (adev->mc.visible_vram_size < adev->mc.real_vram_size) { |
163 | place.fpfn = | 165 | place.fpfn = |
@@ -240,7 +242,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h | |||
240 | r = amdgpu_bo_reserve(obj, false); | 242 | r = amdgpu_bo_reserve(obj, false); |
241 | if (unlikely(r != 0)) | 243 | if (unlikely(r != 0)) |
242 | return r; | 244 | return r; |
243 | r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT, | 245 | r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains, |
244 | min_offset, max_offset, mcaddr); | 246 | min_offset, max_offset, mcaddr); |
245 | amdgpu_bo_unreserve(obj); | 247 | amdgpu_bo_unreserve(obj); |
246 | return r; | 248 | return r; |
@@ -624,11 +626,11 @@ static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device, | |||
624 | int i, r = -1; | 626 | int i, r = -1; |
625 | 627 | ||
626 | for (i = 0; i < adev->num_ip_blocks; i++) { | 628 | for (i = 0; i < adev->num_ip_blocks; i++) { |
627 | if (!adev->ip_block_status[i].valid) | 629 | if (!adev->ip_blocks[i].status.valid) |
628 | continue; | 630 | continue; |
629 | 631 | ||
630 | if (adev->ip_blocks[i].type == block_type) { | 632 | if (adev->ip_blocks[i].version->type == block_type) { |
631 | r = adev->ip_blocks[i].funcs->set_clockgating_state( | 633 | r = adev->ip_blocks[i].version->funcs->set_clockgating_state( |
632 | (void *)adev, | 634 | (void *)adev, |
633 | state); | 635 | state); |
634 | break; | 636 | break; |
@@ -645,11 +647,11 @@ static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device, | |||
645 | int i, r = -1; | 647 | int i, r = -1; |
646 | 648 | ||
647 | for (i = 0; i < adev->num_ip_blocks; i++) { | 649 | for (i = 0; i < adev->num_ip_blocks; i++) { |
648 | if (!adev->ip_block_status[i].valid) | 650 | if (!adev->ip_blocks[i].status.valid) |
649 | continue; | 651 | continue; |
650 | 652 | ||
651 | if (adev->ip_blocks[i].type == block_type) { | 653 | if (adev->ip_blocks[i].version->type == block_type) { |
652 | r = adev->ip_blocks[i].funcs->set_powergating_state( | 654 | r = adev->ip_blocks[i].version->funcs->set_powergating_state( |
653 | (void *)adev, | 655 | (void *)adev, |
654 | state); | 656 | state); |
655 | break; | 657 | break; |
@@ -685,15 +687,21 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type) | |||
685 | result = AMDGPU_UCODE_ID_CP_MEC1; | 687 | result = AMDGPU_UCODE_ID_CP_MEC1; |
686 | break; | 688 | break; |
687 | case CGS_UCODE_ID_CP_MEC_JT2: | 689 | case CGS_UCODE_ID_CP_MEC_JT2: |
688 | if (adev->asic_type == CHIP_TONGA || adev->asic_type == CHIP_POLARIS11 | 690 | /* for VI. JT2 should be the same as JT1, because: |
689 | || adev->asic_type == CHIP_POLARIS10) | 691 | 1, MEC2 and MEC1 use exactly same FW. |
690 | result = AMDGPU_UCODE_ID_CP_MEC2; | 692 | 2, JT2 is not pached but JT1 is. |
691 | else | 693 | */ |
694 | if (adev->asic_type >= CHIP_TOPAZ) | ||
692 | result = AMDGPU_UCODE_ID_CP_MEC1; | 695 | result = AMDGPU_UCODE_ID_CP_MEC1; |
696 | else | ||
697 | result = AMDGPU_UCODE_ID_CP_MEC2; | ||
693 | break; | 698 | break; |
694 | case CGS_UCODE_ID_RLC_G: | 699 | case CGS_UCODE_ID_RLC_G: |
695 | result = AMDGPU_UCODE_ID_RLC_G; | 700 | result = AMDGPU_UCODE_ID_RLC_G; |
696 | break; | 701 | break; |
702 | case CGS_UCODE_ID_STORAGE: | ||
703 | result = AMDGPU_UCODE_ID_STORAGE; | ||
704 | break; | ||
697 | default: | 705 | default: |
698 | DRM_ERROR("Firmware type not supported\n"); | 706 | DRM_ERROR("Firmware type not supported\n"); |
699 | } | 707 | } |
@@ -776,12 +784,18 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, | |||
776 | 784 | ||
777 | if ((type == CGS_UCODE_ID_CP_MEC_JT1) || | 785 | if ((type == CGS_UCODE_ID_CP_MEC_JT1) || |
778 | (type == CGS_UCODE_ID_CP_MEC_JT2)) { | 786 | (type == CGS_UCODE_ID_CP_MEC_JT2)) { |
779 | gpu_addr += le32_to_cpu(header->jt_offset) << 2; | 787 | gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE); |
780 | data_size = le32_to_cpu(header->jt_size) << 2; | 788 | data_size = le32_to_cpu(header->jt_size) << 2; |
781 | } | 789 | } |
782 | info->mc_addr = gpu_addr; | 790 | |
791 | info->kptr = ucode->kaddr; | ||
783 | info->image_size = data_size; | 792 | info->image_size = data_size; |
793 | info->mc_addr = gpu_addr; | ||
784 | info->version = (uint16_t)le32_to_cpu(header->header.ucode_version); | 794 | info->version = (uint16_t)le32_to_cpu(header->header.ucode_version); |
795 | |||
796 | if (CGS_UCODE_ID_CP_MEC == type) | ||
797 | info->image_size = (header->jt_offset) << 2; | ||
798 | |||
785 | info->fw_version = amdgpu_get_firmware_version(cgs_device, type); | 799 | info->fw_version = amdgpu_get_firmware_version(cgs_device, type); |
786 | info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version); | 800 | info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version); |
787 | } else { | 801 | } else { |
@@ -851,6 +865,12 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, | |||
851 | return 0; | 865 | return 0; |
852 | } | 866 | } |
853 | 867 | ||
868 | static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device) | ||
869 | { | ||
870 | CGS_FUNC_ADEV; | ||
871 | return amdgpu_sriov_vf(adev); | ||
872 | } | ||
873 | |||
854 | static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device, | 874 | static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device, |
855 | struct cgs_system_info *sys_info) | 875 | struct cgs_system_info *sys_info) |
856 | { | 876 | { |
@@ -1204,6 +1224,7 @@ static const struct cgs_ops amdgpu_cgs_ops = { | |||
1204 | amdgpu_cgs_notify_dpm_enabled, | 1224 | amdgpu_cgs_notify_dpm_enabled, |
1205 | amdgpu_cgs_call_acpi_method, | 1225 | amdgpu_cgs_call_acpi_method, |
1206 | amdgpu_cgs_query_system_info, | 1226 | amdgpu_cgs_query_system_info, |
1227 | amdgpu_cgs_is_virtualization_enabled | ||
1207 | }; | 1228 | }; |
1208 | 1229 | ||
1209 | static const struct cgs_os_ops amdgpu_cgs_os_ops = { | 1230 | static const struct cgs_os_ops amdgpu_cgs_os_ops = { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index e3281d4e3e41..3af8ffb45b64 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | |||
@@ -1517,88 +1517,6 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = { | |||
1517 | .force = amdgpu_connector_dvi_force, | 1517 | .force = amdgpu_connector_dvi_force, |
1518 | }; | 1518 | }; |
1519 | 1519 | ||
1520 | static struct drm_encoder * | ||
1521 | amdgpu_connector_virtual_encoder(struct drm_connector *connector) | ||
1522 | { | ||
1523 | int enc_id = connector->encoder_ids[0]; | ||
1524 | struct drm_encoder *encoder; | ||
1525 | int i; | ||
1526 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | ||
1527 | if (connector->encoder_ids[i] == 0) | ||
1528 | break; | ||
1529 | |||
1530 | encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); | ||
1531 | if (!encoder) | ||
1532 | continue; | ||
1533 | |||
1534 | if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL) | ||
1535 | return encoder; | ||
1536 | } | ||
1537 | |||
1538 | /* pick the first one */ | ||
1539 | if (enc_id) | ||
1540 | return drm_encoder_find(connector->dev, enc_id); | ||
1541 | return NULL; | ||
1542 | } | ||
1543 | |||
1544 | static int amdgpu_connector_virtual_get_modes(struct drm_connector *connector) | ||
1545 | { | ||
1546 | struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); | ||
1547 | |||
1548 | if (encoder) { | ||
1549 | amdgpu_connector_add_common_modes(encoder, connector); | ||
1550 | } | ||
1551 | |||
1552 | return 0; | ||
1553 | } | ||
1554 | |||
1555 | static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector, | ||
1556 | struct drm_display_mode *mode) | ||
1557 | { | ||
1558 | return MODE_OK; | ||
1559 | } | ||
1560 | |||
1561 | static int | ||
1562 | amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode) | ||
1563 | { | ||
1564 | return 0; | ||
1565 | } | ||
1566 | |||
1567 | static enum drm_connector_status | ||
1568 | |||
1569 | amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force) | ||
1570 | { | ||
1571 | return connector_status_connected; | ||
1572 | } | ||
1573 | |||
1574 | static int | ||
1575 | amdgpu_connector_virtual_set_property(struct drm_connector *connector, | ||
1576 | struct drm_property *property, | ||
1577 | uint64_t val) | ||
1578 | { | ||
1579 | return 0; | ||
1580 | } | ||
1581 | |||
1582 | static void amdgpu_connector_virtual_force(struct drm_connector *connector) | ||
1583 | { | ||
1584 | return; | ||
1585 | } | ||
1586 | |||
1587 | static const struct drm_connector_helper_funcs amdgpu_connector_virtual_helper_funcs = { | ||
1588 | .get_modes = amdgpu_connector_virtual_get_modes, | ||
1589 | .mode_valid = amdgpu_connector_virtual_mode_valid, | ||
1590 | .best_encoder = amdgpu_connector_virtual_encoder, | ||
1591 | }; | ||
1592 | |||
1593 | static const struct drm_connector_funcs amdgpu_connector_virtual_funcs = { | ||
1594 | .dpms = amdgpu_connector_virtual_dpms, | ||
1595 | .detect = amdgpu_connector_virtual_detect, | ||
1596 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
1597 | .set_property = amdgpu_connector_virtual_set_property, | ||
1598 | .destroy = amdgpu_connector_destroy, | ||
1599 | .force = amdgpu_connector_virtual_force, | ||
1600 | }; | ||
1601 | |||
1602 | void | 1520 | void |
1603 | amdgpu_connector_add(struct amdgpu_device *adev, | 1521 | amdgpu_connector_add(struct amdgpu_device *adev, |
1604 | uint32_t connector_id, | 1522 | uint32_t connector_id, |
@@ -1983,17 +1901,6 @@ amdgpu_connector_add(struct amdgpu_device *adev, | |||
1983 | connector->interlace_allowed = false; | 1901 | connector->interlace_allowed = false; |
1984 | connector->doublescan_allowed = false; | 1902 | connector->doublescan_allowed = false; |
1985 | break; | 1903 | break; |
1986 | case DRM_MODE_CONNECTOR_VIRTUAL: | ||
1987 | amdgpu_dig_connector = kzalloc(sizeof(struct amdgpu_connector_atom_dig), GFP_KERNEL); | ||
1988 | if (!amdgpu_dig_connector) | ||
1989 | goto failed; | ||
1990 | amdgpu_connector->con_priv = amdgpu_dig_connector; | ||
1991 | drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_virtual_funcs, connector_type); | ||
1992 | drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_virtual_helper_funcs); | ||
1993 | subpixel_order = SubPixelHorizontalRGB; | ||
1994 | connector->interlace_allowed = false; | ||
1995 | connector->doublescan_allowed = false; | ||
1996 | break; | ||
1997 | } | 1904 | } |
1998 | } | 1905 | } |
1999 | 1906 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index b0f6e6957536..cf03f9f01f40 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -355,6 +355,7 @@ static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, | |||
355 | static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, | 355 | static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, |
356 | struct amdgpu_bo *bo) | 356 | struct amdgpu_bo *bo) |
357 | { | 357 | { |
358 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); | ||
358 | u64 initial_bytes_moved; | 359 | u64 initial_bytes_moved; |
359 | uint32_t domain; | 360 | uint32_t domain; |
360 | int r; | 361 | int r; |
@@ -372,9 +373,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, | |||
372 | 373 | ||
373 | retry: | 374 | retry: |
374 | amdgpu_ttm_placement_from_domain(bo, domain); | 375 | amdgpu_ttm_placement_from_domain(bo, domain); |
375 | initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); | 376 | initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); |
376 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | 377 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); |
377 | p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - | 378 | p->bytes_moved += atomic64_read(&adev->num_bytes_moved) - |
378 | initial_bytes_moved; | 379 | initial_bytes_moved; |
379 | 380 | ||
380 | if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { | 381 | if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { |
@@ -387,9 +388,9 @@ retry: | |||
387 | 388 | ||
388 | /* Last resort, try to evict something from the current working set */ | 389 | /* Last resort, try to evict something from the current working set */ |
389 | static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, | 390 | static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, |
390 | struct amdgpu_bo_list_entry *lobj) | 391 | struct amdgpu_bo *validated) |
391 | { | 392 | { |
392 | uint32_t domain = lobj->robj->allowed_domains; | 393 | uint32_t domain = validated->allowed_domains; |
393 | int r; | 394 | int r; |
394 | 395 | ||
395 | if (!p->evictable) | 396 | if (!p->evictable) |
@@ -400,11 +401,12 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, | |||
400 | 401 | ||
401 | struct amdgpu_bo_list_entry *candidate = p->evictable; | 402 | struct amdgpu_bo_list_entry *candidate = p->evictable; |
402 | struct amdgpu_bo *bo = candidate->robj; | 403 | struct amdgpu_bo *bo = candidate->robj; |
404 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); | ||
403 | u64 initial_bytes_moved; | 405 | u64 initial_bytes_moved; |
404 | uint32_t other; | 406 | uint32_t other; |
405 | 407 | ||
406 | /* If we reached our current BO we can forget it */ | 408 | /* If we reached our current BO we can forget it */ |
407 | if (candidate == lobj) | 409 | if (candidate->robj == validated) |
408 | break; | 410 | break; |
409 | 411 | ||
410 | other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); | 412 | other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); |
@@ -420,9 +422,9 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, | |||
420 | 422 | ||
421 | /* Good we can try to move this BO somewhere else */ | 423 | /* Good we can try to move this BO somewhere else */ |
422 | amdgpu_ttm_placement_from_domain(bo, other); | 424 | amdgpu_ttm_placement_from_domain(bo, other); |
423 | initial_bytes_moved = atomic64_read(&bo->adev->num_bytes_moved); | 425 | initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); |
424 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | 426 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); |
425 | p->bytes_moved += atomic64_read(&bo->adev->num_bytes_moved) - | 427 | p->bytes_moved += atomic64_read(&adev->num_bytes_moved) - |
426 | initial_bytes_moved; | 428 | initial_bytes_moved; |
427 | 429 | ||
428 | if (unlikely(r)) | 430 | if (unlikely(r)) |
@@ -437,6 +439,23 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, | |||
437 | return false; | 439 | return false; |
438 | } | 440 | } |
439 | 441 | ||
442 | static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo) | ||
443 | { | ||
444 | struct amdgpu_cs_parser *p = param; | ||
445 | int r; | ||
446 | |||
447 | do { | ||
448 | r = amdgpu_cs_bo_validate(p, bo); | ||
449 | } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo)); | ||
450 | if (r) | ||
451 | return r; | ||
452 | |||
453 | if (bo->shadow) | ||
454 | r = amdgpu_cs_bo_validate(p, bo); | ||
455 | |||
456 | return r; | ||
457 | } | ||
458 | |||
440 | static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, | 459 | static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, |
441 | struct list_head *validated) | 460 | struct list_head *validated) |
442 | { | 461 | { |
@@ -464,18 +483,10 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, | |||
464 | if (p->evictable == lobj) | 483 | if (p->evictable == lobj) |
465 | p->evictable = NULL; | 484 | p->evictable = NULL; |
466 | 485 | ||
467 | do { | 486 | r = amdgpu_cs_validate(p, bo); |
468 | r = amdgpu_cs_bo_validate(p, bo); | ||
469 | } while (r == -ENOMEM && amdgpu_cs_try_evict(p, lobj)); | ||
470 | if (r) | 487 | if (r) |
471 | return r; | 488 | return r; |
472 | 489 | ||
473 | if (bo->shadow) { | ||
474 | r = amdgpu_cs_bo_validate(p, bo); | ||
475 | if (r) | ||
476 | return r; | ||
477 | } | ||
478 | |||
479 | if (binding_userptr) { | 490 | if (binding_userptr) { |
480 | drm_free_large(lobj->user_pages); | 491 | drm_free_large(lobj->user_pages); |
481 | lobj->user_pages = NULL; | 492 | lobj->user_pages = NULL; |
@@ -593,14 +604,19 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, | |||
593 | list_splice(&need_pages, &p->validated); | 604 | list_splice(&need_pages, &p->validated); |
594 | } | 605 | } |
595 | 606 | ||
596 | amdgpu_vm_get_pt_bos(p->adev, &fpriv->vm, &duplicates); | ||
597 | |||
598 | p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev); | 607 | p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev); |
599 | p->bytes_moved = 0; | 608 | p->bytes_moved = 0; |
600 | p->evictable = list_last_entry(&p->validated, | 609 | p->evictable = list_last_entry(&p->validated, |
601 | struct amdgpu_bo_list_entry, | 610 | struct amdgpu_bo_list_entry, |
602 | tv.head); | 611 | tv.head); |
603 | 612 | ||
613 | r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm, | ||
614 | amdgpu_cs_validate, p); | ||
615 | if (r) { | ||
616 | DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n"); | ||
617 | goto error_validate; | ||
618 | } | ||
619 | |||
604 | r = amdgpu_cs_list_validate(p, &duplicates); | 620 | r = amdgpu_cs_list_validate(p, &duplicates); |
605 | if (r) { | 621 | if (r) { |
606 | DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n"); | 622 | DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n"); |
@@ -806,13 +822,14 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, | |||
806 | 822 | ||
807 | /* Only for UVD/VCE VM emulation */ | 823 | /* Only for UVD/VCE VM emulation */ |
808 | if (ring->funcs->parse_cs) { | 824 | if (ring->funcs->parse_cs) { |
809 | p->job->vm = NULL; | ||
810 | for (i = 0; i < p->job->num_ibs; i++) { | 825 | for (i = 0; i < p->job->num_ibs; i++) { |
811 | r = amdgpu_ring_parse_cs(ring, p, i); | 826 | r = amdgpu_ring_parse_cs(ring, p, i); |
812 | if (r) | 827 | if (r) |
813 | return r; | 828 | return r; |
814 | } | 829 | } |
815 | } else { | 830 | } |
831 | |||
832 | if (p->job->vm) { | ||
816 | p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); | 833 | p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); |
817 | 834 | ||
818 | r = amdgpu_bo_vm_update_pte(p, vm); | 835 | r = amdgpu_bo_vm_update_pte(p, vm); |
@@ -901,7 +918,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, | |||
901 | offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE; | 918 | offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE; |
902 | kptr += chunk_ib->va_start - offset; | 919 | kptr += chunk_ib->va_start - offset; |
903 | 920 | ||
904 | r = amdgpu_ib_get(adev, NULL, chunk_ib->ib_bytes, ib); | 921 | r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib); |
905 | if (r) { | 922 | if (r) { |
906 | DRM_ERROR("Failed to get ib !\n"); | 923 | DRM_ERROR("Failed to get ib !\n"); |
907 | return r; | 924 | return r; |
@@ -916,9 +933,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, | |||
916 | return r; | 933 | return r; |
917 | } | 934 | } |
918 | 935 | ||
919 | ib->gpu_addr = chunk_ib->va_start; | ||
920 | } | 936 | } |
921 | 937 | ||
938 | ib->gpu_addr = chunk_ib->va_start; | ||
922 | ib->length_dw = chunk_ib->ib_bytes / 4; | 939 | ib->length_dw = chunk_ib->ib_bytes / 4; |
923 | ib->flags = chunk_ib->flags; | 940 | ib->flags = chunk_ib->flags; |
924 | j++; | 941 | j++; |
@@ -926,8 +943,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, | |||
926 | 943 | ||
927 | /* UVD & VCE fw doesn't support user fences */ | 944 | /* UVD & VCE fw doesn't support user fences */ |
928 | if (parser->job->uf_addr && ( | 945 | if (parser->job->uf_addr && ( |
929 | parser->job->ring->type == AMDGPU_RING_TYPE_UVD || | 946 | parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD || |
930 | parser->job->ring->type == AMDGPU_RING_TYPE_VCE)) | 947 | parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE)) |
931 | return -EINVAL; | 948 | return -EINVAL; |
932 | 949 | ||
933 | return 0; | 950 | return 0; |
@@ -1195,6 +1212,15 @@ int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser) | |||
1195 | r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); | 1212 | r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); |
1196 | if (unlikely(r)) | 1213 | if (unlikely(r)) |
1197 | return r; | 1214 | return r; |
1215 | |||
1216 | if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) | ||
1217 | continue; | ||
1218 | |||
1219 | bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; | ||
1220 | amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains); | ||
1221 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | ||
1222 | if (unlikely(r)) | ||
1223 | return r; | ||
1198 | } | 1224 | } |
1199 | 1225 | ||
1200 | return 0; | 1226 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index a5e2fcbef0f0..6d86eaef934c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | |||
@@ -55,18 +55,18 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx) | |||
55 | r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity, | 55 | r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity, |
56 | rq, amdgpu_sched_jobs); | 56 | rq, amdgpu_sched_jobs); |
57 | if (r) | 57 | if (r) |
58 | break; | 58 | goto failed; |
59 | } | 59 | } |
60 | 60 | ||
61 | if (i < adev->num_rings) { | ||
62 | for (j = 0; j < i; j++) | ||
63 | amd_sched_entity_fini(&adev->rings[j]->sched, | ||
64 | &ctx->rings[j].entity); | ||
65 | kfree(ctx->fences); | ||
66 | ctx->fences = NULL; | ||
67 | return r; | ||
68 | } | ||
69 | return 0; | 61 | return 0; |
62 | |||
63 | failed: | ||
64 | for (j = 0; j < i; j++) | ||
65 | amd_sched_entity_fini(&adev->rings[j]->sched, | ||
66 | &ctx->rings[j].entity); | ||
67 | kfree(ctx->fences); | ||
68 | ctx->fences = NULL; | ||
69 | return r; | ||
70 | } | 70 | } |
71 | 71 | ||
72 | static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) | 72 | static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index b4f4a9239069..3b9b58debabd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
@@ -264,7 +264,8 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) | |||
264 | if (adev->vram_scratch.robj == NULL) { | 264 | if (adev->vram_scratch.robj == NULL) { |
265 | r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, | 265 | r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, |
266 | PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, | 266 | PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, |
267 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 267 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
268 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
268 | NULL, NULL, &adev->vram_scratch.robj); | 269 | NULL, NULL, &adev->vram_scratch.robj); |
269 | if (r) { | 270 | if (r) { |
270 | return r; | 271 | return r; |
@@ -442,13 +443,9 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, | |||
442 | static void amdgpu_wb_fini(struct amdgpu_device *adev) | 443 | static void amdgpu_wb_fini(struct amdgpu_device *adev) |
443 | { | 444 | { |
444 | if (adev->wb.wb_obj) { | 445 | if (adev->wb.wb_obj) { |
445 | if (!amdgpu_bo_reserve(adev->wb.wb_obj, false)) { | 446 | amdgpu_bo_free_kernel(&adev->wb.wb_obj, |
446 | amdgpu_bo_kunmap(adev->wb.wb_obj); | 447 | &adev->wb.gpu_addr, |
447 | amdgpu_bo_unpin(adev->wb.wb_obj); | 448 | (void **)&adev->wb.wb); |
448 | amdgpu_bo_unreserve(adev->wb.wb_obj); | ||
449 | } | ||
450 | amdgpu_bo_unref(&adev->wb.wb_obj); | ||
451 | adev->wb.wb = NULL; | ||
452 | adev->wb.wb_obj = NULL; | 449 | adev->wb.wb_obj = NULL; |
453 | } | 450 | } |
454 | } | 451 | } |
@@ -467,33 +464,14 @@ static int amdgpu_wb_init(struct amdgpu_device *adev) | |||
467 | int r; | 464 | int r; |
468 | 465 | ||
469 | if (adev->wb.wb_obj == NULL) { | 466 | if (adev->wb.wb_obj == NULL) { |
470 | r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true, | 467 | r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4, |
471 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, | 468 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, |
472 | &adev->wb.wb_obj); | 469 | &adev->wb.wb_obj, &adev->wb.gpu_addr, |
470 | (void **)&adev->wb.wb); | ||
473 | if (r) { | 471 | if (r) { |
474 | dev_warn(adev->dev, "(%d) create WB bo failed\n", r); | 472 | dev_warn(adev->dev, "(%d) create WB bo failed\n", r); |
475 | return r; | 473 | return r; |
476 | } | 474 | } |
477 | r = amdgpu_bo_reserve(adev->wb.wb_obj, false); | ||
478 | if (unlikely(r != 0)) { | ||
479 | amdgpu_wb_fini(adev); | ||
480 | return r; | ||
481 | } | ||
482 | r = amdgpu_bo_pin(adev->wb.wb_obj, AMDGPU_GEM_DOMAIN_GTT, | ||
483 | &adev->wb.gpu_addr); | ||
484 | if (r) { | ||
485 | amdgpu_bo_unreserve(adev->wb.wb_obj); | ||
486 | dev_warn(adev->dev, "(%d) pin WB bo failed\n", r); | ||
487 | amdgpu_wb_fini(adev); | ||
488 | return r; | ||
489 | } | ||
490 | r = amdgpu_bo_kmap(adev->wb.wb_obj, (void **)&adev->wb.wb); | ||
491 | amdgpu_bo_unreserve(adev->wb.wb_obj); | ||
492 | if (r) { | ||
493 | dev_warn(adev->dev, "(%d) map WB bo failed\n", r); | ||
494 | amdgpu_wb_fini(adev); | ||
495 | return r; | ||
496 | } | ||
497 | 475 | ||
498 | adev->wb.num_wb = AMDGPU_MAX_WB; | 476 | adev->wb.num_wb = AMDGPU_MAX_WB; |
499 | memset(&adev->wb.used, 0, sizeof(adev->wb.used)); | 477 | memset(&adev->wb.used, 0, sizeof(adev->wb.used)); |
@@ -1051,6 +1029,13 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) | |||
1051 | amdgpu_vm_block_size); | 1029 | amdgpu_vm_block_size); |
1052 | amdgpu_vm_block_size = 9; | 1030 | amdgpu_vm_block_size = 9; |
1053 | } | 1031 | } |
1032 | |||
1033 | if ((amdgpu_vram_page_split != -1 && amdgpu_vram_page_split < 16) || | ||
1034 | !amdgpu_check_pot_argument(amdgpu_vram_page_split)) { | ||
1035 | dev_warn(adev->dev, "invalid VRAM page split (%d)\n", | ||
1036 | amdgpu_vram_page_split); | ||
1037 | amdgpu_vram_page_split = 1024; | ||
1038 | } | ||
1054 | } | 1039 | } |
1055 | 1040 | ||
1056 | /** | 1041 | /** |
@@ -1125,11 +1110,11 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev, | |||
1125 | int i, r = 0; | 1110 | int i, r = 0; |
1126 | 1111 | ||
1127 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1112 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1128 | if (!adev->ip_block_status[i].valid) | 1113 | if (!adev->ip_blocks[i].status.valid) |
1129 | continue; | 1114 | continue; |
1130 | if (adev->ip_blocks[i].type == block_type) { | 1115 | if (adev->ip_blocks[i].version->type == block_type) { |
1131 | r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, | 1116 | r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, |
1132 | state); | 1117 | state); |
1133 | if (r) | 1118 | if (r) |
1134 | return r; | 1119 | return r; |
1135 | break; | 1120 | break; |
@@ -1145,11 +1130,11 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev, | |||
1145 | int i, r = 0; | 1130 | int i, r = 0; |
1146 | 1131 | ||
1147 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1132 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1148 | if (!adev->ip_block_status[i].valid) | 1133 | if (!adev->ip_blocks[i].status.valid) |
1149 | continue; | 1134 | continue; |
1150 | if (adev->ip_blocks[i].type == block_type) { | 1135 | if (adev->ip_blocks[i].version->type == block_type) { |
1151 | r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev, | 1136 | r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev, |
1152 | state); | 1137 | state); |
1153 | if (r) | 1138 | if (r) |
1154 | return r; | 1139 | return r; |
1155 | break; | 1140 | break; |
@@ -1164,10 +1149,10 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev, | |||
1164 | int i, r; | 1149 | int i, r; |
1165 | 1150 | ||
1166 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1151 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1167 | if (!adev->ip_block_status[i].valid) | 1152 | if (!adev->ip_blocks[i].status.valid) |
1168 | continue; | 1153 | continue; |
1169 | if (adev->ip_blocks[i].type == block_type) { | 1154 | if (adev->ip_blocks[i].version->type == block_type) { |
1170 | r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev); | 1155 | r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev); |
1171 | if (r) | 1156 | if (r) |
1172 | return r; | 1157 | return r; |
1173 | break; | 1158 | break; |
@@ -1183,23 +1168,22 @@ bool amdgpu_is_idle(struct amdgpu_device *adev, | |||
1183 | int i; | 1168 | int i; |
1184 | 1169 | ||
1185 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1170 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1186 | if (!adev->ip_block_status[i].valid) | 1171 | if (!adev->ip_blocks[i].status.valid) |
1187 | continue; | 1172 | continue; |
1188 | if (adev->ip_blocks[i].type == block_type) | 1173 | if (adev->ip_blocks[i].version->type == block_type) |
1189 | return adev->ip_blocks[i].funcs->is_idle((void *)adev); | 1174 | return adev->ip_blocks[i].version->funcs->is_idle((void *)adev); |
1190 | } | 1175 | } |
1191 | return true; | 1176 | return true; |
1192 | 1177 | ||
1193 | } | 1178 | } |
1194 | 1179 | ||
1195 | const struct amdgpu_ip_block_version * amdgpu_get_ip_block( | 1180 | struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev, |
1196 | struct amdgpu_device *adev, | 1181 | enum amd_ip_block_type type) |
1197 | enum amd_ip_block_type type) | ||
1198 | { | 1182 | { |
1199 | int i; | 1183 | int i; |
1200 | 1184 | ||
1201 | for (i = 0; i < adev->num_ip_blocks; i++) | 1185 | for (i = 0; i < adev->num_ip_blocks; i++) |
1202 | if (adev->ip_blocks[i].type == type) | 1186 | if (adev->ip_blocks[i].version->type == type) |
1203 | return &adev->ip_blocks[i]; | 1187 | return &adev->ip_blocks[i]; |
1204 | 1188 | ||
1205 | return NULL; | 1189 | return NULL; |
@@ -1220,38 +1204,75 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, | |||
1220 | enum amd_ip_block_type type, | 1204 | enum amd_ip_block_type type, |
1221 | u32 major, u32 minor) | 1205 | u32 major, u32 minor) |
1222 | { | 1206 | { |
1223 | const struct amdgpu_ip_block_version *ip_block; | 1207 | struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type); |
1224 | ip_block = amdgpu_get_ip_block(adev, type); | ||
1225 | 1208 | ||
1226 | if (ip_block && ((ip_block->major > major) || | 1209 | if (ip_block && ((ip_block->version->major > major) || |
1227 | ((ip_block->major == major) && | 1210 | ((ip_block->version->major == major) && |
1228 | (ip_block->minor >= minor)))) | 1211 | (ip_block->version->minor >= minor)))) |
1229 | return 0; | 1212 | return 0; |
1230 | 1213 | ||
1231 | return 1; | 1214 | return 1; |
1232 | } | 1215 | } |
1233 | 1216 | ||
1234 | static void amdgpu_whether_enable_virtual_display(struct amdgpu_device *adev) | 1217 | /** |
1218 | * amdgpu_ip_block_add | ||
1219 | * | ||
1220 | * @adev: amdgpu_device pointer | ||
1221 | * @ip_block_version: pointer to the IP to add | ||
1222 | * | ||
1223 | * Adds the IP block driver information to the collection of IPs | ||
1224 | * on the asic. | ||
1225 | */ | ||
1226 | int amdgpu_ip_block_add(struct amdgpu_device *adev, | ||
1227 | const struct amdgpu_ip_block_version *ip_block_version) | ||
1228 | { | ||
1229 | if (!ip_block_version) | ||
1230 | return -EINVAL; | ||
1231 | |||
1232 | adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version; | ||
1233 | |||
1234 | return 0; | ||
1235 | } | ||
1236 | |||
1237 | static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) | ||
1235 | { | 1238 | { |
1236 | adev->enable_virtual_display = false; | 1239 | adev->enable_virtual_display = false; |
1237 | 1240 | ||
1238 | if (amdgpu_virtual_display) { | 1241 | if (amdgpu_virtual_display) { |
1239 | struct drm_device *ddev = adev->ddev; | 1242 | struct drm_device *ddev = adev->ddev; |
1240 | const char *pci_address_name = pci_name(ddev->pdev); | 1243 | const char *pci_address_name = pci_name(ddev->pdev); |
1241 | char *pciaddstr, *pciaddstr_tmp, *pciaddname; | 1244 | char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname; |
1242 | 1245 | ||
1243 | pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); | 1246 | pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); |
1244 | pciaddstr_tmp = pciaddstr; | 1247 | pciaddstr_tmp = pciaddstr; |
1245 | while ((pciaddname = strsep(&pciaddstr_tmp, ";"))) { | 1248 | while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) { |
1249 | pciaddname = strsep(&pciaddname_tmp, ","); | ||
1246 | if (!strcmp(pci_address_name, pciaddname)) { | 1250 | if (!strcmp(pci_address_name, pciaddname)) { |
1251 | long num_crtc; | ||
1252 | int res = -1; | ||
1253 | |||
1247 | adev->enable_virtual_display = true; | 1254 | adev->enable_virtual_display = true; |
1255 | |||
1256 | if (pciaddname_tmp) | ||
1257 | res = kstrtol(pciaddname_tmp, 10, | ||
1258 | &num_crtc); | ||
1259 | |||
1260 | if (!res) { | ||
1261 | if (num_crtc < 1) | ||
1262 | num_crtc = 1; | ||
1263 | if (num_crtc > 6) | ||
1264 | num_crtc = 6; | ||
1265 | adev->mode_info.num_crtc = num_crtc; | ||
1266 | } else { | ||
1267 | adev->mode_info.num_crtc = 1; | ||
1268 | } | ||
1248 | break; | 1269 | break; |
1249 | } | 1270 | } |
1250 | } | 1271 | } |
1251 | 1272 | ||
1252 | DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n", | 1273 | DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n", |
1253 | amdgpu_virtual_display, pci_address_name, | 1274 | amdgpu_virtual_display, pci_address_name, |
1254 | adev->enable_virtual_display); | 1275 | adev->enable_virtual_display, adev->mode_info.num_crtc); |
1255 | 1276 | ||
1256 | kfree(pciaddstr); | 1277 | kfree(pciaddstr); |
1257 | } | 1278 | } |
@@ -1261,7 +1282,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev) | |||
1261 | { | 1282 | { |
1262 | int i, r; | 1283 | int i, r; |
1263 | 1284 | ||
1264 | amdgpu_whether_enable_virtual_display(adev); | 1285 | amdgpu_device_enable_virtual_display(adev); |
1265 | 1286 | ||
1266 | switch (adev->asic_type) { | 1287 | switch (adev->asic_type) { |
1267 | case CHIP_TOPAZ: | 1288 | case CHIP_TOPAZ: |
@@ -1313,33 +1334,24 @@ static int amdgpu_early_init(struct amdgpu_device *adev) | |||
1313 | return -EINVAL; | 1334 | return -EINVAL; |
1314 | } | 1335 | } |
1315 | 1336 | ||
1316 | adev->ip_block_status = kcalloc(adev->num_ip_blocks, | ||
1317 | sizeof(struct amdgpu_ip_block_status), GFP_KERNEL); | ||
1318 | if (adev->ip_block_status == NULL) | ||
1319 | return -ENOMEM; | ||
1320 | |||
1321 | if (adev->ip_blocks == NULL) { | ||
1322 | DRM_ERROR("No IP blocks found!\n"); | ||
1323 | return r; | ||
1324 | } | ||
1325 | |||
1326 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1337 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1327 | if ((amdgpu_ip_block_mask & (1 << i)) == 0) { | 1338 | if ((amdgpu_ip_block_mask & (1 << i)) == 0) { |
1328 | DRM_ERROR("disabled ip block: %d\n", i); | 1339 | DRM_ERROR("disabled ip block: %d\n", i); |
1329 | adev->ip_block_status[i].valid = false; | 1340 | adev->ip_blocks[i].status.valid = false; |
1330 | } else { | 1341 | } else { |
1331 | if (adev->ip_blocks[i].funcs->early_init) { | 1342 | if (adev->ip_blocks[i].version->funcs->early_init) { |
1332 | r = adev->ip_blocks[i].funcs->early_init((void *)adev); | 1343 | r = adev->ip_blocks[i].version->funcs->early_init((void *)adev); |
1333 | if (r == -ENOENT) { | 1344 | if (r == -ENOENT) { |
1334 | adev->ip_block_status[i].valid = false; | 1345 | adev->ip_blocks[i].status.valid = false; |
1335 | } else if (r) { | 1346 | } else if (r) { |
1336 | DRM_ERROR("early_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); | 1347 | DRM_ERROR("early_init of IP block <%s> failed %d\n", |
1348 | adev->ip_blocks[i].version->funcs->name, r); | ||
1337 | return r; | 1349 | return r; |
1338 | } else { | 1350 | } else { |
1339 | adev->ip_block_status[i].valid = true; | 1351 | adev->ip_blocks[i].status.valid = true; |
1340 | } | 1352 | } |
1341 | } else { | 1353 | } else { |
1342 | adev->ip_block_status[i].valid = true; | 1354 | adev->ip_blocks[i].status.valid = true; |
1343 | } | 1355 | } |
1344 | } | 1356 | } |
1345 | } | 1357 | } |
@@ -1355,22 +1367,23 @@ static int amdgpu_init(struct amdgpu_device *adev) | |||
1355 | int i, r; | 1367 | int i, r; |
1356 | 1368 | ||
1357 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1369 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1358 | if (!adev->ip_block_status[i].valid) | 1370 | if (!adev->ip_blocks[i].status.valid) |
1359 | continue; | 1371 | continue; |
1360 | r = adev->ip_blocks[i].funcs->sw_init((void *)adev); | 1372 | r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev); |
1361 | if (r) { | 1373 | if (r) { |
1362 | DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); | 1374 | DRM_ERROR("sw_init of IP block <%s> failed %d\n", |
1375 | adev->ip_blocks[i].version->funcs->name, r); | ||
1363 | return r; | 1376 | return r; |
1364 | } | 1377 | } |
1365 | adev->ip_block_status[i].sw = true; | 1378 | adev->ip_blocks[i].status.sw = true; |
1366 | /* need to do gmc hw init early so we can allocate gpu mem */ | 1379 | /* need to do gmc hw init early so we can allocate gpu mem */ |
1367 | if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { | 1380 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { |
1368 | r = amdgpu_vram_scratch_init(adev); | 1381 | r = amdgpu_vram_scratch_init(adev); |
1369 | if (r) { | 1382 | if (r) { |
1370 | DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); | 1383 | DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); |
1371 | return r; | 1384 | return r; |
1372 | } | 1385 | } |
1373 | r = adev->ip_blocks[i].funcs->hw_init((void *)adev); | 1386 | r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); |
1374 | if (r) { | 1387 | if (r) { |
1375 | DRM_ERROR("hw_init %d failed %d\n", i, r); | 1388 | DRM_ERROR("hw_init %d failed %d\n", i, r); |
1376 | return r; | 1389 | return r; |
@@ -1380,22 +1393,23 @@ static int amdgpu_init(struct amdgpu_device *adev) | |||
1380 | DRM_ERROR("amdgpu_wb_init failed %d\n", r); | 1393 | DRM_ERROR("amdgpu_wb_init failed %d\n", r); |
1381 | return r; | 1394 | return r; |
1382 | } | 1395 | } |
1383 | adev->ip_block_status[i].hw = true; | 1396 | adev->ip_blocks[i].status.hw = true; |
1384 | } | 1397 | } |
1385 | } | 1398 | } |
1386 | 1399 | ||
1387 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1400 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1388 | if (!adev->ip_block_status[i].sw) | 1401 | if (!adev->ip_blocks[i].status.sw) |
1389 | continue; | 1402 | continue; |
1390 | /* gmc hw init is done early */ | 1403 | /* gmc hw init is done early */ |
1391 | if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) | 1404 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) |
1392 | continue; | 1405 | continue; |
1393 | r = adev->ip_blocks[i].funcs->hw_init((void *)adev); | 1406 | r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); |
1394 | if (r) { | 1407 | if (r) { |
1395 | DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); | 1408 | DRM_ERROR("hw_init of IP block <%s> failed %d\n", |
1409 | adev->ip_blocks[i].version->funcs->name, r); | ||
1396 | return r; | 1410 | return r; |
1397 | } | 1411 | } |
1398 | adev->ip_block_status[i].hw = true; | 1412 | adev->ip_blocks[i].status.hw = true; |
1399 | } | 1413 | } |
1400 | 1414 | ||
1401 | return 0; | 1415 | return 0; |
@@ -1406,25 +1420,26 @@ static int amdgpu_late_init(struct amdgpu_device *adev) | |||
1406 | int i = 0, r; | 1420 | int i = 0, r; |
1407 | 1421 | ||
1408 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1422 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1409 | if (!adev->ip_block_status[i].valid) | 1423 | if (!adev->ip_blocks[i].status.valid) |
1410 | continue; | 1424 | continue; |
1411 | if (adev->ip_blocks[i].funcs->late_init) { | 1425 | if (adev->ip_blocks[i].version->funcs->late_init) { |
1412 | r = adev->ip_blocks[i].funcs->late_init((void *)adev); | 1426 | r = adev->ip_blocks[i].version->funcs->late_init((void *)adev); |
1413 | if (r) { | 1427 | if (r) { |
1414 | DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); | 1428 | DRM_ERROR("late_init of IP block <%s> failed %d\n", |
1429 | adev->ip_blocks[i].version->funcs->name, r); | ||
1415 | return r; | 1430 | return r; |
1416 | } | 1431 | } |
1417 | adev->ip_block_status[i].late_initialized = true; | 1432 | adev->ip_blocks[i].status.late_initialized = true; |
1418 | } | 1433 | } |
1419 | /* skip CG for VCE/UVD, it's handled specially */ | 1434 | /* skip CG for VCE/UVD, it's handled specially */ |
1420 | if (adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_UVD && | 1435 | if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && |
1421 | adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_VCE) { | 1436 | adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) { |
1422 | /* enable clockgating to save power */ | 1437 | /* enable clockgating to save power */ |
1423 | r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, | 1438 | r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, |
1424 | AMD_CG_STATE_GATE); | 1439 | AMD_CG_STATE_GATE); |
1425 | if (r) { | 1440 | if (r) { |
1426 | DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", | 1441 | DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", |
1427 | adev->ip_blocks[i].funcs->name, r); | 1442 | adev->ip_blocks[i].version->funcs->name, r); |
1428 | return r; | 1443 | return r; |
1429 | } | 1444 | } |
1430 | } | 1445 | } |
@@ -1439,68 +1454,71 @@ static int amdgpu_fini(struct amdgpu_device *adev) | |||
1439 | 1454 | ||
1440 | /* need to disable SMC first */ | 1455 | /* need to disable SMC first */ |
1441 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1456 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1442 | if (!adev->ip_block_status[i].hw) | 1457 | if (!adev->ip_blocks[i].status.hw) |
1443 | continue; | 1458 | continue; |
1444 | if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) { | 1459 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { |
1445 | /* ungate blocks before hw fini so that we can shutdown the blocks safely */ | 1460 | /* ungate blocks before hw fini so that we can shutdown the blocks safely */ |
1446 | r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, | 1461 | r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, |
1447 | AMD_CG_STATE_UNGATE); | 1462 | AMD_CG_STATE_UNGATE); |
1448 | if (r) { | 1463 | if (r) { |
1449 | DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", | 1464 | DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", |
1450 | adev->ip_blocks[i].funcs->name, r); | 1465 | adev->ip_blocks[i].version->funcs->name, r); |
1451 | return r; | 1466 | return r; |
1452 | } | 1467 | } |
1453 | r = adev->ip_blocks[i].funcs->hw_fini((void *)adev); | 1468 | r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); |
1454 | /* XXX handle errors */ | 1469 | /* XXX handle errors */ |
1455 | if (r) { | 1470 | if (r) { |
1456 | DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", | 1471 | DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", |
1457 | adev->ip_blocks[i].funcs->name, r); | 1472 | adev->ip_blocks[i].version->funcs->name, r); |
1458 | } | 1473 | } |
1459 | adev->ip_block_status[i].hw = false; | 1474 | adev->ip_blocks[i].status.hw = false; |
1460 | break; | 1475 | break; |
1461 | } | 1476 | } |
1462 | } | 1477 | } |
1463 | 1478 | ||
1464 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { | 1479 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { |
1465 | if (!adev->ip_block_status[i].hw) | 1480 | if (!adev->ip_blocks[i].status.hw) |
1466 | continue; | 1481 | continue; |
1467 | if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { | 1482 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { |
1468 | amdgpu_wb_fini(adev); | 1483 | amdgpu_wb_fini(adev); |
1469 | amdgpu_vram_scratch_fini(adev); | 1484 | amdgpu_vram_scratch_fini(adev); |
1470 | } | 1485 | } |
1471 | /* ungate blocks before hw fini so that we can shutdown the blocks safely */ | 1486 | /* ungate blocks before hw fini so that we can shutdown the blocks safely */ |
1472 | r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, | 1487 | r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, |
1473 | AMD_CG_STATE_UNGATE); | 1488 | AMD_CG_STATE_UNGATE); |
1474 | if (r) { | 1489 | if (r) { |
1475 | DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); | 1490 | DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", |
1491 | adev->ip_blocks[i].version->funcs->name, r); | ||
1476 | return r; | 1492 | return r; |
1477 | } | 1493 | } |
1478 | r = adev->ip_blocks[i].funcs->hw_fini((void *)adev); | 1494 | r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); |
1479 | /* XXX handle errors */ | 1495 | /* XXX handle errors */ |
1480 | if (r) { | 1496 | if (r) { |
1481 | DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); | 1497 | DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", |
1498 | adev->ip_blocks[i].version->funcs->name, r); | ||
1482 | } | 1499 | } |
1483 | adev->ip_block_status[i].hw = false; | 1500 | adev->ip_blocks[i].status.hw = false; |
1484 | } | 1501 | } |
1485 | 1502 | ||
1486 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { | 1503 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { |
1487 | if (!adev->ip_block_status[i].sw) | 1504 | if (!adev->ip_blocks[i].status.sw) |
1488 | continue; | 1505 | continue; |
1489 | r = adev->ip_blocks[i].funcs->sw_fini((void *)adev); | 1506 | r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); |
1490 | /* XXX handle errors */ | 1507 | /* XXX handle errors */ |
1491 | if (r) { | 1508 | if (r) { |
1492 | DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); | 1509 | DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", |
1510 | adev->ip_blocks[i].version->funcs->name, r); | ||
1493 | } | 1511 | } |
1494 | adev->ip_block_status[i].sw = false; | 1512 | adev->ip_blocks[i].status.sw = false; |
1495 | adev->ip_block_status[i].valid = false; | 1513 | adev->ip_blocks[i].status.valid = false; |
1496 | } | 1514 | } |
1497 | 1515 | ||
1498 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { | 1516 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { |
1499 | if (!adev->ip_block_status[i].late_initialized) | 1517 | if (!adev->ip_blocks[i].status.late_initialized) |
1500 | continue; | 1518 | continue; |
1501 | if (adev->ip_blocks[i].funcs->late_fini) | 1519 | if (adev->ip_blocks[i].version->funcs->late_fini) |
1502 | adev->ip_blocks[i].funcs->late_fini((void *)adev); | 1520 | adev->ip_blocks[i].version->funcs->late_fini((void *)adev); |
1503 | adev->ip_block_status[i].late_initialized = false; | 1521 | adev->ip_blocks[i].status.late_initialized = false; |
1504 | } | 1522 | } |
1505 | 1523 | ||
1506 | return 0; | 1524 | return 0; |
@@ -1518,21 +1536,23 @@ static int amdgpu_suspend(struct amdgpu_device *adev) | |||
1518 | } | 1536 | } |
1519 | 1537 | ||
1520 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { | 1538 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { |
1521 | if (!adev->ip_block_status[i].valid) | 1539 | if (!adev->ip_blocks[i].status.valid) |
1522 | continue; | 1540 | continue; |
1523 | /* ungate blocks so that suspend can properly shut them down */ | 1541 | /* ungate blocks so that suspend can properly shut them down */ |
1524 | if (i != AMD_IP_BLOCK_TYPE_SMC) { | 1542 | if (i != AMD_IP_BLOCK_TYPE_SMC) { |
1525 | r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, | 1543 | r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, |
1526 | AMD_CG_STATE_UNGATE); | 1544 | AMD_CG_STATE_UNGATE); |
1527 | if (r) { | 1545 | if (r) { |
1528 | DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); | 1546 | DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", |
1547 | adev->ip_blocks[i].version->funcs->name, r); | ||
1529 | } | 1548 | } |
1530 | } | 1549 | } |
1531 | /* XXX handle errors */ | 1550 | /* XXX handle errors */ |
1532 | r = adev->ip_blocks[i].funcs->suspend(adev); | 1551 | r = adev->ip_blocks[i].version->funcs->suspend(adev); |
1533 | /* XXX handle errors */ | 1552 | /* XXX handle errors */ |
1534 | if (r) { | 1553 | if (r) { |
1535 | DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); | 1554 | DRM_ERROR("suspend of IP block <%s> failed %d\n", |
1555 | adev->ip_blocks[i].version->funcs->name, r); | ||
1536 | } | 1556 | } |
1537 | } | 1557 | } |
1538 | 1558 | ||
@@ -1544,11 +1564,12 @@ static int amdgpu_resume(struct amdgpu_device *adev) | |||
1544 | int i, r; | 1564 | int i, r; |
1545 | 1565 | ||
1546 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1566 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1547 | if (!adev->ip_block_status[i].valid) | 1567 | if (!adev->ip_blocks[i].status.valid) |
1548 | continue; | 1568 | continue; |
1549 | r = adev->ip_blocks[i].funcs->resume(adev); | 1569 | r = adev->ip_blocks[i].version->funcs->resume(adev); |
1550 | if (r) { | 1570 | if (r) { |
1551 | DRM_ERROR("resume of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); | 1571 | DRM_ERROR("resume of IP block <%s> failed %d\n", |
1572 | adev->ip_blocks[i].version->funcs->name, r); | ||
1552 | return r; | 1573 | return r; |
1553 | } | 1574 | } |
1554 | } | 1575 | } |
@@ -1859,8 +1880,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev) | |||
1859 | amdgpu_fence_driver_fini(adev); | 1880 | amdgpu_fence_driver_fini(adev); |
1860 | amdgpu_fbdev_fini(adev); | 1881 | amdgpu_fbdev_fini(adev); |
1861 | r = amdgpu_fini(adev); | 1882 | r = amdgpu_fini(adev); |
1862 | kfree(adev->ip_block_status); | ||
1863 | adev->ip_block_status = NULL; | ||
1864 | adev->accel_working = false; | 1883 | adev->accel_working = false; |
1865 | /* free i2c buses */ | 1884 | /* free i2c buses */ |
1866 | amdgpu_i2c_fini(adev); | 1885 | amdgpu_i2c_fini(adev); |
@@ -1956,7 +1975,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) | |||
1956 | 1975 | ||
1957 | r = amdgpu_suspend(adev); | 1976 | r = amdgpu_suspend(adev); |
1958 | 1977 | ||
1959 | /* evict remaining vram memory */ | 1978 | /* evict remaining vram memory |
1979 | * This second call to evict vram is to evict the gart page table | ||
1980 | * using the CPU. | ||
1981 | */ | ||
1960 | amdgpu_bo_evict_vram(adev); | 1982 | amdgpu_bo_evict_vram(adev); |
1961 | 1983 | ||
1962 | pci_save_state(dev->pdev); | 1984 | pci_save_state(dev->pdev); |
@@ -2096,13 +2118,13 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev) | |||
2096 | bool asic_hang = false; | 2118 | bool asic_hang = false; |
2097 | 2119 | ||
2098 | for (i = 0; i < adev->num_ip_blocks; i++) { | 2120 | for (i = 0; i < adev->num_ip_blocks; i++) { |
2099 | if (!adev->ip_block_status[i].valid) | 2121 | if (!adev->ip_blocks[i].status.valid) |
2100 | continue; | 2122 | continue; |
2101 | if (adev->ip_blocks[i].funcs->check_soft_reset) | 2123 | if (adev->ip_blocks[i].version->funcs->check_soft_reset) |
2102 | adev->ip_block_status[i].hang = | 2124 | adev->ip_blocks[i].status.hang = |
2103 | adev->ip_blocks[i].funcs->check_soft_reset(adev); | 2125 | adev->ip_blocks[i].version->funcs->check_soft_reset(adev); |
2104 | if (adev->ip_block_status[i].hang) { | 2126 | if (adev->ip_blocks[i].status.hang) { |
2105 | DRM_INFO("IP block:%d is hang!\n", i); | 2127 | DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); |
2106 | asic_hang = true; | 2128 | asic_hang = true; |
2107 | } | 2129 | } |
2108 | } | 2130 | } |
@@ -2114,11 +2136,11 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev) | |||
2114 | int i, r = 0; | 2136 | int i, r = 0; |
2115 | 2137 | ||
2116 | for (i = 0; i < adev->num_ip_blocks; i++) { | 2138 | for (i = 0; i < adev->num_ip_blocks; i++) { |
2117 | if (!adev->ip_block_status[i].valid) | 2139 | if (!adev->ip_blocks[i].status.valid) |
2118 | continue; | 2140 | continue; |
2119 | if (adev->ip_block_status[i].hang && | 2141 | if (adev->ip_blocks[i].status.hang && |
2120 | adev->ip_blocks[i].funcs->pre_soft_reset) { | 2142 | adev->ip_blocks[i].version->funcs->pre_soft_reset) { |
2121 | r = adev->ip_blocks[i].funcs->pre_soft_reset(adev); | 2143 | r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev); |
2122 | if (r) | 2144 | if (r) |
2123 | return r; | 2145 | return r; |
2124 | } | 2146 | } |
@@ -2132,13 +2154,13 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev) | |||
2132 | int i; | 2154 | int i; |
2133 | 2155 | ||
2134 | for (i = 0; i < adev->num_ip_blocks; i++) { | 2156 | for (i = 0; i < adev->num_ip_blocks; i++) { |
2135 | if (!adev->ip_block_status[i].valid) | 2157 | if (!adev->ip_blocks[i].status.valid) |
2136 | continue; | 2158 | continue; |
2137 | if ((adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) || | 2159 | if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) || |
2138 | (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) || | 2160 | (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) || |
2139 | (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_ACP) || | 2161 | (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) || |
2140 | (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_DCE)) { | 2162 | (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) { |
2141 | if (adev->ip_block_status[i].hang) { | 2163 | if (adev->ip_blocks[i].status.hang) { |
2142 | DRM_INFO("Some block need full reset!\n"); | 2164 | DRM_INFO("Some block need full reset!\n"); |
2143 | return true; | 2165 | return true; |
2144 | } | 2166 | } |
@@ -2152,11 +2174,11 @@ static int amdgpu_soft_reset(struct amdgpu_device *adev) | |||
2152 | int i, r = 0; | 2174 | int i, r = 0; |
2153 | 2175 | ||
2154 | for (i = 0; i < adev->num_ip_blocks; i++) { | 2176 | for (i = 0; i < adev->num_ip_blocks; i++) { |
2155 | if (!adev->ip_block_status[i].valid) | 2177 | if (!adev->ip_blocks[i].status.valid) |
2156 | continue; | 2178 | continue; |
2157 | if (adev->ip_block_status[i].hang && | 2179 | if (adev->ip_blocks[i].status.hang && |
2158 | adev->ip_blocks[i].funcs->soft_reset) { | 2180 | adev->ip_blocks[i].version->funcs->soft_reset) { |
2159 | r = adev->ip_blocks[i].funcs->soft_reset(adev); | 2181 | r = adev->ip_blocks[i].version->funcs->soft_reset(adev); |
2160 | if (r) | 2182 | if (r) |
2161 | return r; | 2183 | return r; |
2162 | } | 2184 | } |
@@ -2170,11 +2192,11 @@ static int amdgpu_post_soft_reset(struct amdgpu_device *adev) | |||
2170 | int i, r = 0; | 2192 | int i, r = 0; |
2171 | 2193 | ||
2172 | for (i = 0; i < adev->num_ip_blocks; i++) { | 2194 | for (i = 0; i < adev->num_ip_blocks; i++) { |
2173 | if (!adev->ip_block_status[i].valid) | 2195 | if (!adev->ip_blocks[i].status.valid) |
2174 | continue; | 2196 | continue; |
2175 | if (adev->ip_block_status[i].hang && | 2197 | if (adev->ip_blocks[i].status.hang && |
2176 | adev->ip_blocks[i].funcs->post_soft_reset) | 2198 | adev->ip_blocks[i].version->funcs->post_soft_reset) |
2177 | r = adev->ip_blocks[i].funcs->post_soft_reset(adev); | 2199 | r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev); |
2178 | if (r) | 2200 | if (r) |
2179 | return r; | 2201 | return r; |
2180 | } | 2202 | } |
@@ -2531,6 +2553,13 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, | |||
2531 | se_bank = (*pos >> 24) & 0x3FF; | 2553 | se_bank = (*pos >> 24) & 0x3FF; |
2532 | sh_bank = (*pos >> 34) & 0x3FF; | 2554 | sh_bank = (*pos >> 34) & 0x3FF; |
2533 | instance_bank = (*pos >> 44) & 0x3FF; | 2555 | instance_bank = (*pos >> 44) & 0x3FF; |
2556 | |||
2557 | if (se_bank == 0x3FF) | ||
2558 | se_bank = 0xFFFFFFFF; | ||
2559 | if (sh_bank == 0x3FF) | ||
2560 | sh_bank = 0xFFFFFFFF; | ||
2561 | if (instance_bank == 0x3FF) | ||
2562 | instance_bank = 0xFFFFFFFF; | ||
2534 | use_bank = 1; | 2563 | use_bank = 1; |
2535 | } else { | 2564 | } else { |
2536 | use_bank = 0; | 2565 | use_bank = 0; |
@@ -2539,8 +2568,8 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, | |||
2539 | *pos &= 0x3FFFF; | 2568 | *pos &= 0x3FFFF; |
2540 | 2569 | ||
2541 | if (use_bank) { | 2570 | if (use_bank) { |
2542 | if (sh_bank >= adev->gfx.config.max_sh_per_se || | 2571 | if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || |
2543 | se_bank >= adev->gfx.config.max_shader_engines) | 2572 | (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) |
2544 | return -EINVAL; | 2573 | return -EINVAL; |
2545 | mutex_lock(&adev->grbm_idx_mutex); | 2574 | mutex_lock(&adev->grbm_idx_mutex); |
2546 | amdgpu_gfx_select_se_sh(adev, se_bank, | 2575 | amdgpu_gfx_select_se_sh(adev, se_bank, |
@@ -2587,10 +2616,45 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, | |||
2587 | struct amdgpu_device *adev = f->f_inode->i_private; | 2616 | struct amdgpu_device *adev = f->f_inode->i_private; |
2588 | ssize_t result = 0; | 2617 | ssize_t result = 0; |
2589 | int r; | 2618 | int r; |
2619 | bool pm_pg_lock, use_bank; | ||
2620 | unsigned instance_bank, sh_bank, se_bank; | ||
2590 | 2621 | ||
2591 | if (size & 0x3 || *pos & 0x3) | 2622 | if (size & 0x3 || *pos & 0x3) |
2592 | return -EINVAL; | 2623 | return -EINVAL; |
2593 | 2624 | ||
2625 | /* are we reading registers for which a PG lock is necessary? */ | ||
2626 | pm_pg_lock = (*pos >> 23) & 1; | ||
2627 | |||
2628 | if (*pos & (1ULL << 62)) { | ||
2629 | se_bank = (*pos >> 24) & 0x3FF; | ||
2630 | sh_bank = (*pos >> 34) & 0x3FF; | ||
2631 | instance_bank = (*pos >> 44) & 0x3FF; | ||
2632 | |||
2633 | if (se_bank == 0x3FF) | ||
2634 | se_bank = 0xFFFFFFFF; | ||
2635 | if (sh_bank == 0x3FF) | ||
2636 | sh_bank = 0xFFFFFFFF; | ||
2637 | if (instance_bank == 0x3FF) | ||
2638 | instance_bank = 0xFFFFFFFF; | ||
2639 | use_bank = 1; | ||
2640 | } else { | ||
2641 | use_bank = 0; | ||
2642 | } | ||
2643 | |||
2644 | *pos &= 0x3FFFF; | ||
2645 | |||
2646 | if (use_bank) { | ||
2647 | if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || | ||
2648 | (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) | ||
2649 | return -EINVAL; | ||
2650 | mutex_lock(&adev->grbm_idx_mutex); | ||
2651 | amdgpu_gfx_select_se_sh(adev, se_bank, | ||
2652 | sh_bank, instance_bank); | ||
2653 | } | ||
2654 | |||
2655 | if (pm_pg_lock) | ||
2656 | mutex_lock(&adev->pm.mutex); | ||
2657 | |||
2594 | while (size) { | 2658 | while (size) { |
2595 | uint32_t value; | 2659 | uint32_t value; |
2596 | 2660 | ||
@@ -2609,6 +2673,14 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, | |||
2609 | size -= 4; | 2673 | size -= 4; |
2610 | } | 2674 | } |
2611 | 2675 | ||
2676 | if (use_bank) { | ||
2677 | amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | ||
2678 | mutex_unlock(&adev->grbm_idx_mutex); | ||
2679 | } | ||
2680 | |||
2681 | if (pm_pg_lock) | ||
2682 | mutex_unlock(&adev->pm.mutex); | ||
2683 | |||
2612 | return result; | 2684 | return result; |
2613 | } | 2685 | } |
2614 | 2686 | ||
@@ -2871,6 +2943,56 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, | |||
2871 | return !r ? 4 : r; | 2943 | return !r ? 4 : r; |
2872 | } | 2944 | } |
2873 | 2945 | ||
2946 | static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, | ||
2947 | size_t size, loff_t *pos) | ||
2948 | { | ||
2949 | struct amdgpu_device *adev = f->f_inode->i_private; | ||
2950 | int r, x; | ||
2951 | ssize_t result=0; | ||
2952 | uint32_t offset, se, sh, cu, wave, simd, data[32]; | ||
2953 | |||
2954 | if (size & 3 || *pos & 3) | ||
2955 | return -EINVAL; | ||
2956 | |||
2957 | /* decode offset */ | ||
2958 | offset = (*pos & 0x7F); | ||
2959 | se = ((*pos >> 7) & 0xFF); | ||
2960 | sh = ((*pos >> 15) & 0xFF); | ||
2961 | cu = ((*pos >> 23) & 0xFF); | ||
2962 | wave = ((*pos >> 31) & 0xFF); | ||
2963 | simd = ((*pos >> 37) & 0xFF); | ||
2964 | |||
2965 | /* switch to the specific se/sh/cu */ | ||
2966 | mutex_lock(&adev->grbm_idx_mutex); | ||
2967 | amdgpu_gfx_select_se_sh(adev, se, sh, cu); | ||
2968 | |||
2969 | x = 0; | ||
2970 | if (adev->gfx.funcs->read_wave_data) | ||
2971 | adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x); | ||
2972 | |||
2973 | amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); | ||
2974 | mutex_unlock(&adev->grbm_idx_mutex); | ||
2975 | |||
2976 | if (!x) | ||
2977 | return -EINVAL; | ||
2978 | |||
2979 | while (size && (offset < x * 4)) { | ||
2980 | uint32_t value; | ||
2981 | |||
2982 | value = data[offset >> 2]; | ||
2983 | r = put_user(value, (uint32_t *)buf); | ||
2984 | if (r) | ||
2985 | return r; | ||
2986 | |||
2987 | result += 4; | ||
2988 | buf += 4; | ||
2989 | offset += 4; | ||
2990 | size -= 4; | ||
2991 | } | ||
2992 | |||
2993 | return result; | ||
2994 | } | ||
2995 | |||
2874 | static const struct file_operations amdgpu_debugfs_regs_fops = { | 2996 | static const struct file_operations amdgpu_debugfs_regs_fops = { |
2875 | .owner = THIS_MODULE, | 2997 | .owner = THIS_MODULE, |
2876 | .read = amdgpu_debugfs_regs_read, | 2998 | .read = amdgpu_debugfs_regs_read, |
@@ -2908,6 +3030,12 @@ static const struct file_operations amdgpu_debugfs_sensors_fops = { | |||
2908 | .llseek = default_llseek | 3030 | .llseek = default_llseek |
2909 | }; | 3031 | }; |
2910 | 3032 | ||
3033 | static const struct file_operations amdgpu_debugfs_wave_fops = { | ||
3034 | .owner = THIS_MODULE, | ||
3035 | .read = amdgpu_debugfs_wave_read, | ||
3036 | .llseek = default_llseek | ||
3037 | }; | ||
3038 | |||
2911 | static const struct file_operations *debugfs_regs[] = { | 3039 | static const struct file_operations *debugfs_regs[] = { |
2912 | &amdgpu_debugfs_regs_fops, | 3040 | &amdgpu_debugfs_regs_fops, |
2913 | &amdgpu_debugfs_regs_didt_fops, | 3041 | &amdgpu_debugfs_regs_didt_fops, |
@@ -2915,6 +3043,7 @@ static const struct file_operations *debugfs_regs[] = { | |||
2915 | &amdgpu_debugfs_regs_smc_fops, | 3043 | &amdgpu_debugfs_regs_smc_fops, |
2916 | &amdgpu_debugfs_gca_config_fops, | 3044 | &amdgpu_debugfs_gca_config_fops, |
2917 | &amdgpu_debugfs_sensors_fops, | 3045 | &amdgpu_debugfs_sensors_fops, |
3046 | &amdgpu_debugfs_wave_fops, | ||
2918 | }; | 3047 | }; |
2919 | 3048 | ||
2920 | static const char *debugfs_regs_names[] = { | 3049 | static const char *debugfs_regs_names[] = { |
@@ -2924,6 +3053,7 @@ static const char *debugfs_regs_names[] = { | |||
2924 | "amdgpu_regs_smc", | 3053 | "amdgpu_regs_smc", |
2925 | "amdgpu_gca_config", | 3054 | "amdgpu_gca_config", |
2926 | "amdgpu_sensors", | 3055 | "amdgpu_sensors", |
3056 | "amdgpu_wave", | ||
2927 | }; | 3057 | }; |
2928 | 3058 | ||
2929 | static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) | 3059 | static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 083e2b429872..c7bc2b3c1b97 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | |||
@@ -68,9 +68,9 @@ static void amdgpu_flip_work_func(struct work_struct *__work) | |||
68 | struct amdgpu_flip_work *work = | 68 | struct amdgpu_flip_work *work = |
69 | container_of(delayed_work, struct amdgpu_flip_work, flip_work); | 69 | container_of(delayed_work, struct amdgpu_flip_work, flip_work); |
70 | struct amdgpu_device *adev = work->adev; | 70 | struct amdgpu_device *adev = work->adev; |
71 | struct amdgpu_crtc *amdgpuCrtc = adev->mode_info.crtcs[work->crtc_id]; | 71 | struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id]; |
72 | 72 | ||
73 | struct drm_crtc *crtc = &amdgpuCrtc->base; | 73 | struct drm_crtc *crtc = &amdgpu_crtc->base; |
74 | unsigned long flags; | 74 | unsigned long flags; |
75 | unsigned i; | 75 | unsigned i; |
76 | int vpos, hpos; | 76 | int vpos, hpos; |
@@ -85,14 +85,14 @@ static void amdgpu_flip_work_func(struct work_struct *__work) | |||
85 | /* Wait until we're out of the vertical blank period before the one | 85 | /* Wait until we're out of the vertical blank period before the one |
86 | * targeted by the flip | 86 | * targeted by the flip |
87 | */ | 87 | */ |
88 | if (amdgpuCrtc->enabled && | 88 | if (amdgpu_crtc->enabled && |
89 | (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0, | 89 | (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0, |
90 | &vpos, &hpos, NULL, NULL, | 90 | &vpos, &hpos, NULL, NULL, |
91 | &crtc->hwmode) | 91 | &crtc->hwmode) |
92 | & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == | 92 | & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == |
93 | (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && | 93 | (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && |
94 | (int)(work->target_vblank - | 94 | (int)(work->target_vblank - |
95 | amdgpu_get_vblank_counter_kms(adev->ddev, amdgpuCrtc->crtc_id)) > 0) { | 95 | amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) { |
96 | schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000)); | 96 | schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000)); |
97 | return; | 97 | return; |
98 | } | 98 | } |
@@ -104,12 +104,12 @@ static void amdgpu_flip_work_func(struct work_struct *__work) | |||
104 | adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async); | 104 | adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async); |
105 | 105 | ||
106 | /* Set the flip status */ | 106 | /* Set the flip status */ |
107 | amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; | 107 | amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED; |
108 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | 108 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); |
109 | 109 | ||
110 | 110 | ||
111 | DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n", | 111 | DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n", |
112 | amdgpuCrtc->crtc_id, amdgpuCrtc, work); | 112 | amdgpu_crtc->crtc_id, amdgpu_crtc, work); |
113 | 113 | ||
114 | } | 114 | } |
115 | 115 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c index 14f57d9915e3..6ca0333ca4c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c | |||
@@ -553,9 +553,10 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) | |||
553 | entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) | 553 | entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) |
554 | ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); | 554 | ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); |
555 | } | 555 | } |
556 | for (i = 0; i < states->numEntries; i++) { | 556 | adev->pm.dpm.num_of_vce_states = |
557 | if (i >= AMDGPU_MAX_VCE_LEVELS) | 557 | states->numEntries > AMD_MAX_VCE_LEVELS ? |
558 | break; | 558 | AMD_MAX_VCE_LEVELS : states->numEntries; |
559 | for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { | ||
559 | vce_clk = (VCEClockInfo *) | 560 | vce_clk = (VCEClockInfo *) |
560 | ((u8 *)&array->entries[0] + | 561 | ((u8 *)&array->entries[0] + |
561 | (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); | 562 | (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); |
@@ -955,3 +956,12 @@ u8 amdgpu_encode_pci_lane_width(u32 lanes) | |||
955 | 956 | ||
956 | return encoded_lanes[lanes]; | 957 | return encoded_lanes[lanes]; |
957 | } | 958 | } |
959 | |||
960 | struct amd_vce_state* | ||
961 | amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx) | ||
962 | { | ||
963 | if (idx < adev->pm.dpm.num_of_vce_states) | ||
964 | return &adev->pm.dpm.vce_states[idx]; | ||
965 | |||
966 | return NULL; | ||
967 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h index 3738a96c2619..bd85e35998e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h | |||
@@ -23,6 +23,446 @@ | |||
23 | #ifndef __AMDGPU_DPM_H__ | 23 | #ifndef __AMDGPU_DPM_H__ |
24 | #define __AMDGPU_DPM_H__ | 24 | #define __AMDGPU_DPM_H__ |
25 | 25 | ||
26 | enum amdgpu_int_thermal_type { | ||
27 | THERMAL_TYPE_NONE, | ||
28 | THERMAL_TYPE_EXTERNAL, | ||
29 | THERMAL_TYPE_EXTERNAL_GPIO, | ||
30 | THERMAL_TYPE_RV6XX, | ||
31 | THERMAL_TYPE_RV770, | ||
32 | THERMAL_TYPE_ADT7473_WITH_INTERNAL, | ||
33 | THERMAL_TYPE_EVERGREEN, | ||
34 | THERMAL_TYPE_SUMO, | ||
35 | THERMAL_TYPE_NI, | ||
36 | THERMAL_TYPE_SI, | ||
37 | THERMAL_TYPE_EMC2103_WITH_INTERNAL, | ||
38 | THERMAL_TYPE_CI, | ||
39 | THERMAL_TYPE_KV, | ||
40 | }; | ||
41 | |||
42 | enum amdgpu_dpm_auto_throttle_src { | ||
43 | AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, | ||
44 | AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL | ||
45 | }; | ||
46 | |||
47 | enum amdgpu_dpm_event_src { | ||
48 | AMDGPU_DPM_EVENT_SRC_ANALOG = 0, | ||
49 | AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1, | ||
50 | AMDGPU_DPM_EVENT_SRC_DIGITAL = 2, | ||
51 | AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, | ||
52 | AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4 | ||
53 | }; | ||
54 | |||
55 | struct amdgpu_ps { | ||
56 | u32 caps; /* vbios flags */ | ||
57 | u32 class; /* vbios flags */ | ||
58 | u32 class2; /* vbios flags */ | ||
59 | /* UVD clocks */ | ||
60 | u32 vclk; | ||
61 | u32 dclk; | ||
62 | /* VCE clocks */ | ||
63 | u32 evclk; | ||
64 | u32 ecclk; | ||
65 | bool vce_active; | ||
66 | enum amd_vce_level vce_level; | ||
67 | /* asic priv */ | ||
68 | void *ps_priv; | ||
69 | }; | ||
70 | |||
71 | struct amdgpu_dpm_thermal { | ||
72 | /* thermal interrupt work */ | ||
73 | struct work_struct work; | ||
74 | /* low temperature threshold */ | ||
75 | int min_temp; | ||
76 | /* high temperature threshold */ | ||
77 | int max_temp; | ||
78 | /* was last interrupt low to high or high to low */ | ||
79 | bool high_to_low; | ||
80 | /* interrupt source */ | ||
81 | struct amdgpu_irq_src irq; | ||
82 | }; | ||
83 | |||
84 | enum amdgpu_clk_action | ||
85 | { | ||
86 | AMDGPU_SCLK_UP = 1, | ||
87 | AMDGPU_SCLK_DOWN | ||
88 | }; | ||
89 | |||
90 | struct amdgpu_blacklist_clocks | ||
91 | { | ||
92 | u32 sclk; | ||
93 | u32 mclk; | ||
94 | enum amdgpu_clk_action action; | ||
95 | }; | ||
96 | |||
97 | struct amdgpu_clock_and_voltage_limits { | ||
98 | u32 sclk; | ||
99 | u32 mclk; | ||
100 | u16 vddc; | ||
101 | u16 vddci; | ||
102 | }; | ||
103 | |||
104 | struct amdgpu_clock_array { | ||
105 | u32 count; | ||
106 | u32 *values; | ||
107 | }; | ||
108 | |||
109 | struct amdgpu_clock_voltage_dependency_entry { | ||
110 | u32 clk; | ||
111 | u16 v; | ||
112 | }; | ||
113 | |||
114 | struct amdgpu_clock_voltage_dependency_table { | ||
115 | u32 count; | ||
116 | struct amdgpu_clock_voltage_dependency_entry *entries; | ||
117 | }; | ||
118 | |||
119 | union amdgpu_cac_leakage_entry { | ||
120 | struct { | ||
121 | u16 vddc; | ||
122 | u32 leakage; | ||
123 | }; | ||
124 | struct { | ||
125 | u16 vddc1; | ||
126 | u16 vddc2; | ||
127 | u16 vddc3; | ||
128 | }; | ||
129 | }; | ||
130 | |||
131 | struct amdgpu_cac_leakage_table { | ||
132 | u32 count; | ||
133 | union amdgpu_cac_leakage_entry *entries; | ||
134 | }; | ||
135 | |||
136 | struct amdgpu_phase_shedding_limits_entry { | ||
137 | u16 voltage; | ||
138 | u32 sclk; | ||
139 | u32 mclk; | ||
140 | }; | ||
141 | |||
142 | struct amdgpu_phase_shedding_limits_table { | ||
143 | u32 count; | ||
144 | struct amdgpu_phase_shedding_limits_entry *entries; | ||
145 | }; | ||
146 | |||
147 | struct amdgpu_uvd_clock_voltage_dependency_entry { | ||
148 | u32 vclk; | ||
149 | u32 dclk; | ||
150 | u16 v; | ||
151 | }; | ||
152 | |||
153 | struct amdgpu_uvd_clock_voltage_dependency_table { | ||
154 | u8 count; | ||
155 | struct amdgpu_uvd_clock_voltage_dependency_entry *entries; | ||
156 | }; | ||
157 | |||
158 | struct amdgpu_vce_clock_voltage_dependency_entry { | ||
159 | u32 ecclk; | ||
160 | u32 evclk; | ||
161 | u16 v; | ||
162 | }; | ||
163 | |||
164 | struct amdgpu_vce_clock_voltage_dependency_table { | ||
165 | u8 count; | ||
166 | struct amdgpu_vce_clock_voltage_dependency_entry *entries; | ||
167 | }; | ||
168 | |||
169 | struct amdgpu_ppm_table { | ||
170 | u8 ppm_design; | ||
171 | u16 cpu_core_number; | ||
172 | u32 platform_tdp; | ||
173 | u32 small_ac_platform_tdp; | ||
174 | u32 platform_tdc; | ||
175 | u32 small_ac_platform_tdc; | ||
176 | u32 apu_tdp; | ||
177 | u32 dgpu_tdp; | ||
178 | u32 dgpu_ulv_power; | ||
179 | u32 tj_max; | ||
180 | }; | ||
181 | |||
182 | struct amdgpu_cac_tdp_table { | ||
183 | u16 tdp; | ||
184 | u16 configurable_tdp; | ||
185 | u16 tdc; | ||
186 | u16 battery_power_limit; | ||
187 | u16 small_power_limit; | ||
188 | u16 low_cac_leakage; | ||
189 | u16 high_cac_leakage; | ||
190 | u16 maximum_power_delivery_limit; | ||
191 | }; | ||
192 | |||
193 | struct amdgpu_dpm_dynamic_state { | ||
194 | struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk; | ||
195 | struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk; | ||
196 | struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk; | ||
197 | struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk; | ||
198 | struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk; | ||
199 | struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table; | ||
200 | struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table; | ||
201 | struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table; | ||
202 | struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table; | ||
203 | struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk; | ||
204 | struct amdgpu_clock_array valid_sclk_values; | ||
205 | struct amdgpu_clock_array valid_mclk_values; | ||
206 | struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc; | ||
207 | struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac; | ||
208 | u32 mclk_sclk_ratio; | ||
209 | u32 sclk_mclk_delta; | ||
210 | u16 vddc_vddci_delta; | ||
211 | u16 min_vddc_for_pcie_gen2; | ||
212 | struct amdgpu_cac_leakage_table cac_leakage_table; | ||
213 | struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table; | ||
214 | struct amdgpu_ppm_table *ppm_table; | ||
215 | struct amdgpu_cac_tdp_table *cac_tdp_table; | ||
216 | }; | ||
217 | |||
218 | struct amdgpu_dpm_fan { | ||
219 | u16 t_min; | ||
220 | u16 t_med; | ||
221 | u16 t_high; | ||
222 | u16 pwm_min; | ||
223 | u16 pwm_med; | ||
224 | u16 pwm_high; | ||
225 | u8 t_hyst; | ||
226 | u32 cycle_delay; | ||
227 | u16 t_max; | ||
228 | u8 control_mode; | ||
229 | u16 default_max_fan_pwm; | ||
230 | u16 default_fan_output_sensitivity; | ||
231 | u16 fan_output_sensitivity; | ||
232 | bool ucode_fan_control; | ||
233 | }; | ||
234 | |||
235 | enum amdgpu_pcie_gen { | ||
236 | AMDGPU_PCIE_GEN1 = 0, | ||
237 | AMDGPU_PCIE_GEN2 = 1, | ||
238 | AMDGPU_PCIE_GEN3 = 2, | ||
239 | AMDGPU_PCIE_GEN_INVALID = 0xffff | ||
240 | }; | ||
241 | |||
242 | enum amdgpu_dpm_forced_level { | ||
243 | AMDGPU_DPM_FORCED_LEVEL_AUTO = 0, | ||
244 | AMDGPU_DPM_FORCED_LEVEL_LOW = 1, | ||
245 | AMDGPU_DPM_FORCED_LEVEL_HIGH = 2, | ||
246 | AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3, | ||
247 | }; | ||
248 | |||
249 | struct amdgpu_dpm_funcs { | ||
250 | int (*get_temperature)(struct amdgpu_device *adev); | ||
251 | int (*pre_set_power_state)(struct amdgpu_device *adev); | ||
252 | int (*set_power_state)(struct amdgpu_device *adev); | ||
253 | void (*post_set_power_state)(struct amdgpu_device *adev); | ||
254 | void (*display_configuration_changed)(struct amdgpu_device *adev); | ||
255 | u32 (*get_sclk)(struct amdgpu_device *adev, bool low); | ||
256 | u32 (*get_mclk)(struct amdgpu_device *adev, bool low); | ||
257 | void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps); | ||
258 | void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m); | ||
259 | int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level); | ||
260 | bool (*vblank_too_short)(struct amdgpu_device *adev); | ||
261 | void (*powergate_uvd)(struct amdgpu_device *adev, bool gate); | ||
262 | void (*powergate_vce)(struct amdgpu_device *adev, bool gate); | ||
263 | void (*enable_bapm)(struct amdgpu_device *adev, bool enable); | ||
264 | void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode); | ||
265 | u32 (*get_fan_control_mode)(struct amdgpu_device *adev); | ||
266 | int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed); | ||
267 | int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed); | ||
268 | int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask); | ||
269 | int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf); | ||
270 | int (*get_sclk_od)(struct amdgpu_device *adev); | ||
271 | int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value); | ||
272 | int (*get_mclk_od)(struct amdgpu_device *adev); | ||
273 | int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value); | ||
274 | int (*check_state_equal)(struct amdgpu_device *adev, | ||
275 | struct amdgpu_ps *cps, | ||
276 | struct amdgpu_ps *rps, | ||
277 | bool *equal); | ||
278 | |||
279 | struct amd_vce_state* (*get_vce_clock_state)(struct amdgpu_device *adev, unsigned idx); | ||
280 | }; | ||
281 | |||
282 | #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev)) | ||
283 | #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev)) | ||
284 | #define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev)) | ||
285 | #define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev)) | ||
286 | #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps)) | ||
287 | #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev)) | ||
288 | #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) | ||
289 | |||
290 | #define amdgpu_dpm_read_sensor(adev, idx, value) \ | ||
291 | ((adev)->pp_enabled ? \ | ||
292 | (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \ | ||
293 | -EINVAL) | ||
294 | |||
295 | #define amdgpu_dpm_get_temperature(adev) \ | ||
296 | ((adev)->pp_enabled ? \ | ||
297 | (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \ | ||
298 | (adev)->pm.funcs->get_temperature((adev))) | ||
299 | |||
300 | #define amdgpu_dpm_set_fan_control_mode(adev, m) \ | ||
301 | ((adev)->pp_enabled ? \ | ||
302 | (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \ | ||
303 | (adev)->pm.funcs->set_fan_control_mode((adev), (m))) | ||
304 | |||
305 | #define amdgpu_dpm_get_fan_control_mode(adev) \ | ||
306 | ((adev)->pp_enabled ? \ | ||
307 | (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \ | ||
308 | (adev)->pm.funcs->get_fan_control_mode((adev))) | ||
309 | |||
310 | #define amdgpu_dpm_set_fan_speed_percent(adev, s) \ | ||
311 | ((adev)->pp_enabled ? \ | ||
312 | (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ | ||
313 | (adev)->pm.funcs->set_fan_speed_percent((adev), (s))) | ||
314 | |||
315 | #define amdgpu_dpm_get_fan_speed_percent(adev, s) \ | ||
316 | ((adev)->pp_enabled ? \ | ||
317 | (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ | ||
318 | (adev)->pm.funcs->get_fan_speed_percent((adev), (s))) | ||
319 | |||
320 | #define amdgpu_dpm_get_sclk(adev, l) \ | ||
321 | ((adev)->pp_enabled ? \ | ||
322 | (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \ | ||
323 | (adev)->pm.funcs->get_sclk((adev), (l))) | ||
324 | |||
325 | #define amdgpu_dpm_get_mclk(adev, l) \ | ||
326 | ((adev)->pp_enabled ? \ | ||
327 | (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \ | ||
328 | (adev)->pm.funcs->get_mclk((adev), (l))) | ||
329 | |||
330 | |||
331 | #define amdgpu_dpm_force_performance_level(adev, l) \ | ||
332 | ((adev)->pp_enabled ? \ | ||
333 | (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \ | ||
334 | (adev)->pm.funcs->force_performance_level((adev), (l))) | ||
335 | |||
336 | #define amdgpu_dpm_powergate_uvd(adev, g) \ | ||
337 | ((adev)->pp_enabled ? \ | ||
338 | (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \ | ||
339 | (adev)->pm.funcs->powergate_uvd((adev), (g))) | ||
340 | |||
341 | #define amdgpu_dpm_powergate_vce(adev, g) \ | ||
342 | ((adev)->pp_enabled ? \ | ||
343 | (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \ | ||
344 | (adev)->pm.funcs->powergate_vce((adev), (g))) | ||
345 | |||
346 | #define amdgpu_dpm_get_current_power_state(adev) \ | ||
347 | (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle) | ||
348 | |||
349 | #define amdgpu_dpm_get_performance_level(adev) \ | ||
350 | (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) | ||
351 | |||
352 | #define amdgpu_dpm_get_pp_num_states(adev, data) \ | ||
353 | (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data) | ||
354 | |||
355 | #define amdgpu_dpm_get_pp_table(adev, table) \ | ||
356 | (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table) | ||
357 | |||
358 | #define amdgpu_dpm_set_pp_table(adev, buf, size) \ | ||
359 | (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size) | ||
360 | |||
361 | #define amdgpu_dpm_print_clock_levels(adev, type, buf) \ | ||
362 | (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf) | ||
363 | |||
364 | #define amdgpu_dpm_force_clock_level(adev, type, level) \ | ||
365 | (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level) | ||
366 | |||
367 | #define amdgpu_dpm_get_sclk_od(adev) \ | ||
368 | (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle) | ||
369 | |||
370 | #define amdgpu_dpm_set_sclk_od(adev, value) \ | ||
371 | (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value) | ||
372 | |||
373 | #define amdgpu_dpm_get_mclk_od(adev) \ | ||
374 | ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle)) | ||
375 | |||
376 | #define amdgpu_dpm_set_mclk_od(adev, value) \ | ||
377 | ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value)) | ||
378 | |||
379 | #define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \ | ||
380 | (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output)) | ||
381 | |||
382 | #define amgdpu_dpm_check_state_equal(adev, cps, rps, equal) (adev)->pm.funcs->check_state_equal((adev), (cps),(rps),(equal)) | ||
383 | |||
384 | #define amdgpu_dpm_get_vce_clock_state(adev, i) \ | ||
385 | ((adev)->pp_enabled ? \ | ||
386 | (adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)) : \ | ||
387 | (adev)->pm.funcs->get_vce_clock_state((adev), (i))) | ||
388 | |||
389 | struct amdgpu_dpm { | ||
390 | struct amdgpu_ps *ps; | ||
391 | /* number of valid power states */ | ||
392 | int num_ps; | ||
393 | /* current power state that is active */ | ||
394 | struct amdgpu_ps *current_ps; | ||
395 | /* requested power state */ | ||
396 | struct amdgpu_ps *requested_ps; | ||
397 | /* boot up power state */ | ||
398 | struct amdgpu_ps *boot_ps; | ||
399 | /* default uvd power state */ | ||
400 | struct amdgpu_ps *uvd_ps; | ||
401 | /* vce requirements */ | ||
402 | u32 num_of_vce_states; | ||
403 | struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS]; | ||
404 | enum amd_vce_level vce_level; | ||
405 | enum amd_pm_state_type state; | ||
406 | enum amd_pm_state_type user_state; | ||
407 | enum amd_pm_state_type last_state; | ||
408 | enum amd_pm_state_type last_user_state; | ||
409 | u32 platform_caps; | ||
410 | u32 voltage_response_time; | ||
411 | u32 backbias_response_time; | ||
412 | void *priv; | ||
413 | u32 new_active_crtcs; | ||
414 | int new_active_crtc_count; | ||
415 | u32 current_active_crtcs; | ||
416 | int current_active_crtc_count; | ||
417 | struct amdgpu_dpm_dynamic_state dyn_state; | ||
418 | struct amdgpu_dpm_fan fan; | ||
419 | u32 tdp_limit; | ||
420 | u32 near_tdp_limit; | ||
421 | u32 near_tdp_limit_adjusted; | ||
422 | u32 sq_ramping_threshold; | ||
423 | u32 cac_leakage; | ||
424 | u16 tdp_od_limit; | ||
425 | u32 tdp_adjustment; | ||
426 | u16 load_line_slope; | ||
427 | bool power_control; | ||
428 | bool ac_power; | ||
429 | /* special states active */ | ||
430 | bool thermal_active; | ||
431 | bool uvd_active; | ||
432 | bool vce_active; | ||
433 | /* thermal handling */ | ||
434 | struct amdgpu_dpm_thermal thermal; | ||
435 | /* forced levels */ | ||
436 | enum amdgpu_dpm_forced_level forced_level; | ||
437 | }; | ||
438 | |||
439 | struct amdgpu_pm { | ||
440 | struct mutex mutex; | ||
441 | u32 current_sclk; | ||
442 | u32 current_mclk; | ||
443 | u32 default_sclk; | ||
444 | u32 default_mclk; | ||
445 | struct amdgpu_i2c_chan *i2c_bus; | ||
446 | /* internal thermal controller on rv6xx+ */ | ||
447 | enum amdgpu_int_thermal_type int_thermal_type; | ||
448 | struct device *int_hwmon_dev; | ||
449 | /* fan control parameters */ | ||
450 | bool no_fan; | ||
451 | u8 fan_pulses_per_revolution; | ||
452 | u8 fan_min_rpm; | ||
453 | u8 fan_max_rpm; | ||
454 | /* dpm */ | ||
455 | bool dpm_enabled; | ||
456 | bool sysfs_initialized; | ||
457 | struct amdgpu_dpm dpm; | ||
458 | const struct firmware *fw; /* SMC firmware */ | ||
459 | uint32_t fw_version; | ||
460 | const struct amdgpu_dpm_funcs *funcs; | ||
461 | uint32_t pcie_gen_mask; | ||
462 | uint32_t pcie_mlw_mask; | ||
463 | struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */ | ||
464 | }; | ||
465 | |||
26 | #define R600_SSTU_DFLT 0 | 466 | #define R600_SSTU_DFLT 0 |
27 | #define R600_SST_DFLT 0x00C8 | 467 | #define R600_SST_DFLT 0x00C8 |
28 | 468 | ||
@@ -82,4 +522,7 @@ u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev, | |||
82 | u16 default_lanes); | 522 | u16 default_lanes); |
83 | u8 amdgpu_encode_pci_lane_width(u32 lanes); | 523 | u8 amdgpu_encode_pci_lane_width(u32 lanes); |
84 | 524 | ||
525 | struct amd_vce_state* | ||
526 | amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx); | ||
527 | |||
85 | #endif | 528 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 71ed27eb3dde..6bb4d9e9afe4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | |||
@@ -58,9 +58,10 @@ | |||
58 | * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer. | 58 | * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer. |
59 | * - 3.7.0 - Add support for VCE clock list packet | 59 | * - 3.7.0 - Add support for VCE clock list packet |
60 | * - 3.8.0 - Add support raster config init in the kernel | 60 | * - 3.8.0 - Add support raster config init in the kernel |
61 | * - 3.9.0 - Add support for memory query info about VRAM and GTT. | ||
61 | */ | 62 | */ |
62 | #define KMS_DRIVER_MAJOR 3 | 63 | #define KMS_DRIVER_MAJOR 3 |
63 | #define KMS_DRIVER_MINOR 8 | 64 | #define KMS_DRIVER_MINOR 9 |
64 | #define KMS_DRIVER_PATCHLEVEL 0 | 65 | #define KMS_DRIVER_PATCHLEVEL 0 |
65 | 66 | ||
66 | int amdgpu_vram_limit = 0; | 67 | int amdgpu_vram_limit = 0; |
@@ -85,6 +86,7 @@ int amdgpu_vm_size = 64; | |||
85 | int amdgpu_vm_block_size = -1; | 86 | int amdgpu_vm_block_size = -1; |
86 | int amdgpu_vm_fault_stop = 0; | 87 | int amdgpu_vm_fault_stop = 0; |
87 | int amdgpu_vm_debug = 0; | 88 | int amdgpu_vm_debug = 0; |
89 | int amdgpu_vram_page_split = 1024; | ||
88 | int amdgpu_exp_hw_support = 0; | 90 | int amdgpu_exp_hw_support = 0; |
89 | int amdgpu_sched_jobs = 32; | 91 | int amdgpu_sched_jobs = 32; |
90 | int amdgpu_sched_hw_submission = 2; | 92 | int amdgpu_sched_hw_submission = 2; |
@@ -165,6 +167,9 @@ module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444); | |||
165 | MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)"); | 167 | MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)"); |
166 | module_param_named(vm_debug, amdgpu_vm_debug, int, 0644); | 168 | module_param_named(vm_debug, amdgpu_vm_debug, int, 0644); |
167 | 169 | ||
170 | MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 1024, -1 = disable)"); | ||
171 | module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444); | ||
172 | |||
168 | MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))"); | 173 | MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))"); |
169 | module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444); | 174 | module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444); |
170 | 175 | ||
@@ -201,7 +206,8 @@ module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444); | |||
201 | MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)"); | 206 | MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)"); |
202 | module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444); | 207 | module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444); |
203 | 208 | ||
204 | MODULE_PARM_DESC(virtual_display, "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x;xxxx:xx:xx.x)"); | 209 | MODULE_PARM_DESC(virtual_display, |
210 | "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)"); | ||
205 | module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444); | 211 | module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444); |
206 | 212 | ||
207 | static const struct pci_device_id pciidlist[] = { | 213 | static const struct pci_device_id pciidlist[] = { |
@@ -381,6 +387,7 @@ static const struct pci_device_id pciidlist[] = { | |||
381 | {0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, | 387 | {0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, |
382 | /* fiji */ | 388 | /* fiji */ |
383 | {0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI}, | 389 | {0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI}, |
390 | {0x1002, 0x730F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI}, | ||
384 | /* carrizo */ | 391 | /* carrizo */ |
385 | {0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, | 392 | {0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, |
386 | {0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, | 393 | {0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 8d01aa24d68a..38bdc2d300a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | |||
@@ -152,7 +152,8 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, | |||
152 | aligned_size = ALIGN(size, PAGE_SIZE); | 152 | aligned_size = ALIGN(size, PAGE_SIZE); |
153 | ret = amdgpu_gem_object_create(adev, aligned_size, 0, | 153 | ret = amdgpu_gem_object_create(adev, aligned_size, 0, |
154 | AMDGPU_GEM_DOMAIN_VRAM, | 154 | AMDGPU_GEM_DOMAIN_VRAM, |
155 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 155 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
156 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
156 | true, &gobj); | 157 | true, &gobj); |
157 | if (ret) { | 158 | if (ret) { |
158 | printk(KERN_ERR "failed to allocate framebuffer (%d)\n", | 159 | printk(KERN_ERR "failed to allocate framebuffer (%d)\n", |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 21a1242fc13b..964d2a946ed5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | |||
@@ -126,7 +126,8 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev) | |||
126 | if (adev->gart.robj == NULL) { | 126 | if (adev->gart.robj == NULL) { |
127 | r = amdgpu_bo_create(adev, adev->gart.table_size, | 127 | r = amdgpu_bo_create(adev, adev->gart.table_size, |
128 | PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, | 128 | PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, |
129 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 129 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
130 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
130 | NULL, NULL, &adev->gart.robj); | 131 | NULL, NULL, &adev->gart.robj); |
131 | if (r) { | 132 | if (r) { |
132 | return r; | 133 | return r; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 3ad0bf6ce3e4..cd62f6ffde2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
@@ -116,10 +116,11 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev) | |||
116 | * Call from drm_gem_handle_create which appear in both new and open ioctl | 116 | * Call from drm_gem_handle_create which appear in both new and open ioctl |
117 | * case. | 117 | * case. |
118 | */ | 118 | */ |
119 | int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) | 119 | int amdgpu_gem_object_open(struct drm_gem_object *obj, |
120 | struct drm_file *file_priv) | ||
120 | { | 121 | { |
121 | struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj); | 122 | struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj); |
122 | struct amdgpu_device *adev = abo->adev; | 123 | struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); |
123 | struct amdgpu_fpriv *fpriv = file_priv->driver_priv; | 124 | struct amdgpu_fpriv *fpriv = file_priv->driver_priv; |
124 | struct amdgpu_vm *vm = &fpriv->vm; | 125 | struct amdgpu_vm *vm = &fpriv->vm; |
125 | struct amdgpu_bo_va *bo_va; | 126 | struct amdgpu_bo_va *bo_va; |
@@ -142,7 +143,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, | |||
142 | struct drm_file *file_priv) | 143 | struct drm_file *file_priv) |
143 | { | 144 | { |
144 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); | 145 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); |
145 | struct amdgpu_device *adev = bo->adev; | 146 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
146 | struct amdgpu_fpriv *fpriv = file_priv->driver_priv; | 147 | struct amdgpu_fpriv *fpriv = file_priv->driver_priv; |
147 | struct amdgpu_vm *vm = &fpriv->vm; | 148 | struct amdgpu_vm *vm = &fpriv->vm; |
148 | 149 | ||
@@ -468,6 +469,16 @@ out: | |||
468 | return r; | 469 | return r; |
469 | } | 470 | } |
470 | 471 | ||
472 | static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo) | ||
473 | { | ||
474 | unsigned domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); | ||
475 | |||
476 | /* if anything is swapped out don't swap it in here, | ||
477 | just abort and wait for the next CS */ | ||
478 | |||
479 | return domain == AMDGPU_GEM_DOMAIN_CPU ? -ERESTARTSYS : 0; | ||
480 | } | ||
481 | |||
471 | /** | 482 | /** |
472 | * amdgpu_gem_va_update_vm -update the bo_va in its VM | 483 | * amdgpu_gem_va_update_vm -update the bo_va in its VM |
473 | * | 484 | * |
@@ -478,7 +489,8 @@ out: | |||
478 | * vital here, so they are not reported back to userspace. | 489 | * vital here, so they are not reported back to userspace. |
479 | */ | 490 | */ |
480 | static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, | 491 | static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, |
481 | struct amdgpu_bo_va *bo_va, uint32_t operation) | 492 | struct amdgpu_bo_va *bo_va, |
493 | uint32_t operation) | ||
482 | { | 494 | { |
483 | struct ttm_validate_buffer tv, *entry; | 495 | struct ttm_validate_buffer tv, *entry; |
484 | struct amdgpu_bo_list_entry vm_pd; | 496 | struct amdgpu_bo_list_entry vm_pd; |
@@ -501,7 +513,6 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, | |||
501 | if (r) | 513 | if (r) |
502 | goto error_print; | 514 | goto error_print; |
503 | 515 | ||
504 | amdgpu_vm_get_pt_bos(adev, bo_va->vm, &duplicates); | ||
505 | list_for_each_entry(entry, &list, head) { | 516 | list_for_each_entry(entry, &list, head) { |
506 | domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); | 517 | domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); |
507 | /* if anything is swapped out don't swap it in here, | 518 | /* if anything is swapped out don't swap it in here, |
@@ -509,13 +520,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, | |||
509 | if (domain == AMDGPU_GEM_DOMAIN_CPU) | 520 | if (domain == AMDGPU_GEM_DOMAIN_CPU) |
510 | goto error_unreserve; | 521 | goto error_unreserve; |
511 | } | 522 | } |
512 | list_for_each_entry(entry, &duplicates, head) { | 523 | r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check, |
513 | domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); | 524 | NULL); |
514 | /* if anything is swapped out don't swap it in here, | 525 | if (r) |
515 | just abort and wait for the next CS */ | 526 | goto error_unreserve; |
516 | if (domain == AMDGPU_GEM_DOMAIN_CPU) | ||
517 | goto error_unreserve; | ||
518 | } | ||
519 | 527 | ||
520 | r = amdgpu_vm_update_page_directory(adev, bo_va->vm); | 528 | r = amdgpu_vm_update_page_directory(adev, bo_va->vm); |
521 | if (r) | 529 | if (r) |
@@ -536,8 +544,6 @@ error_print: | |||
536 | DRM_ERROR("Couldn't update BO_VA (%d)\n", r); | 544 | DRM_ERROR("Couldn't update BO_VA (%d)\n", r); |
537 | } | 545 | } |
538 | 546 | ||
539 | |||
540 | |||
541 | int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | 547 | int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, |
542 | struct drm_file *filp) | 548 | struct drm_file *filp) |
543 | { | 549 | { |
@@ -547,7 +553,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
547 | struct amdgpu_fpriv *fpriv = filp->driver_priv; | 553 | struct amdgpu_fpriv *fpriv = filp->driver_priv; |
548 | struct amdgpu_bo *abo; | 554 | struct amdgpu_bo *abo; |
549 | struct amdgpu_bo_va *bo_va; | 555 | struct amdgpu_bo_va *bo_va; |
550 | struct ttm_validate_buffer tv, tv_pd; | 556 | struct amdgpu_bo_list_entry vm_pd; |
557 | struct ttm_validate_buffer tv; | ||
551 | struct ww_acquire_ctx ticket; | 558 | struct ww_acquire_ctx ticket; |
552 | struct list_head list, duplicates; | 559 | struct list_head list, duplicates; |
553 | uint32_t invalid_flags, va_flags = 0; | 560 | uint32_t invalid_flags, va_flags = 0; |
@@ -592,9 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
592 | tv.shared = true; | 599 | tv.shared = true; |
593 | list_add(&tv.head, &list); | 600 | list_add(&tv.head, &list); |
594 | 601 | ||
595 | tv_pd.bo = &fpriv->vm.page_directory->tbo; | 602 | amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); |
596 | tv_pd.shared = true; | ||
597 | list_add(&tv_pd.head, &list); | ||
598 | 603 | ||
599 | r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); | 604 | r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); |
600 | if (r) { | 605 | if (r) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index a074edd95c70..01a42b6a69a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | |||
@@ -24,6 +24,7 @@ | |||
24 | */ | 24 | */ |
25 | #include <drm/drmP.h> | 25 | #include <drm/drmP.h> |
26 | #include "amdgpu.h" | 26 | #include "amdgpu.h" |
27 | #include "amdgpu_gfx.h" | ||
27 | 28 | ||
28 | /* | 29 | /* |
29 | * GPU scratch registers helpers function. | 30 | * GPU scratch registers helpers function. |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 51321e154c09..e02044086445 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | |||
@@ -27,6 +27,7 @@ | |||
27 | int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg); | 27 | int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg); |
28 | void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg); | 28 | void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg); |
29 | 29 | ||
30 | unsigned amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh); | 30 | void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, |
31 | unsigned max_sh); | ||
31 | 32 | ||
32 | #endif | 33 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index f86c84427778..3c634f02a3d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | |||
@@ -168,6 +168,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, | |||
168 | return -ENOMEM; | 168 | return -ENOMEM; |
169 | 169 | ||
170 | node->start = AMDGPU_BO_INVALID_OFFSET; | 170 | node->start = AMDGPU_BO_INVALID_OFFSET; |
171 | node->size = mem->num_pages; | ||
171 | mem->mm_node = node; | 172 | mem->mm_node = node; |
172 | 173 | ||
173 | if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) { | 174 | if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 6a6c86c9c169..16308eb22e7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | |||
@@ -152,8 +152,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
152 | return -EINVAL; | 152 | return -EINVAL; |
153 | } | 153 | } |
154 | 154 | ||
155 | alloc_size = amdgpu_ring_get_dma_frame_size(ring) + | 155 | alloc_size = ring->funcs->emit_frame_size + num_ibs * |
156 | num_ibs * amdgpu_ring_get_emit_ib_size(ring); | 156 | ring->funcs->emit_ib_size; |
157 | 157 | ||
158 | r = amdgpu_ring_alloc(ring, alloc_size); | 158 | r = amdgpu_ring_alloc(ring, alloc_size); |
159 | if (r) { | 159 | if (r) { |
@@ -161,7 +161,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
161 | return r; | 161 | return r; |
162 | } | 162 | } |
163 | 163 | ||
164 | if (ring->type == AMDGPU_RING_TYPE_SDMA && ring->funcs->init_cond_exec) | 164 | if (ring->funcs->init_cond_exec) |
165 | patch_offset = amdgpu_ring_init_cond_exec(ring); | 165 | patch_offset = amdgpu_ring_init_cond_exec(ring); |
166 | 166 | ||
167 | if (vm) { | 167 | if (vm) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index c2c7fb140338..78392671046a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | |||
@@ -306,10 +306,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
306 | } | 306 | } |
307 | 307 | ||
308 | for (i = 0; i < adev->num_ip_blocks; i++) { | 308 | for (i = 0; i < adev->num_ip_blocks; i++) { |
309 | if (adev->ip_blocks[i].type == type && | 309 | if (adev->ip_blocks[i].version->type == type && |
310 | adev->ip_block_status[i].valid) { | 310 | adev->ip_blocks[i].status.valid) { |
311 | ip.hw_ip_version_major = adev->ip_blocks[i].major; | 311 | ip.hw_ip_version_major = adev->ip_blocks[i].version->major; |
312 | ip.hw_ip_version_minor = adev->ip_blocks[i].minor; | 312 | ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor; |
313 | ip.capabilities_flags = 0; | 313 | ip.capabilities_flags = 0; |
314 | ip.available_rings = ring_mask; | 314 | ip.available_rings = ring_mask; |
315 | ip.ib_start_alignment = ib_start_alignment; | 315 | ip.ib_start_alignment = ib_start_alignment; |
@@ -345,8 +345,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
345 | } | 345 | } |
346 | 346 | ||
347 | for (i = 0; i < adev->num_ip_blocks; i++) | 347 | for (i = 0; i < adev->num_ip_blocks; i++) |
348 | if (adev->ip_blocks[i].type == type && | 348 | if (adev->ip_blocks[i].version->type == type && |
349 | adev->ip_block_status[i].valid && | 349 | adev->ip_blocks[i].status.valid && |
350 | count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) | 350 | count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) |
351 | count++; | 351 | count++; |
352 | 352 | ||
@@ -411,6 +411,36 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
411 | return copy_to_user(out, &vram_gtt, | 411 | return copy_to_user(out, &vram_gtt, |
412 | min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; | 412 | min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; |
413 | } | 413 | } |
414 | case AMDGPU_INFO_MEMORY: { | ||
415 | struct drm_amdgpu_memory_info mem; | ||
416 | |||
417 | memset(&mem, 0, sizeof(mem)); | ||
418 | mem.vram.total_heap_size = adev->mc.real_vram_size; | ||
419 | mem.vram.usable_heap_size = | ||
420 | adev->mc.real_vram_size - adev->vram_pin_size; | ||
421 | mem.vram.heap_usage = atomic64_read(&adev->vram_usage); | ||
422 | mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; | ||
423 | |||
424 | mem.cpu_accessible_vram.total_heap_size = | ||
425 | adev->mc.visible_vram_size; | ||
426 | mem.cpu_accessible_vram.usable_heap_size = | ||
427 | adev->mc.visible_vram_size - | ||
428 | (adev->vram_pin_size - adev->invisible_pin_size); | ||
429 | mem.cpu_accessible_vram.heap_usage = | ||
430 | atomic64_read(&adev->vram_vis_usage); | ||
431 | mem.cpu_accessible_vram.max_allocation = | ||
432 | mem.cpu_accessible_vram.usable_heap_size * 3 / 4; | ||
433 | |||
434 | mem.gtt.total_heap_size = adev->mc.gtt_size; | ||
435 | mem.gtt.usable_heap_size = | ||
436 | adev->mc.gtt_size - adev->gart_pin_size; | ||
437 | mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage); | ||
438 | mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4; | ||
439 | |||
440 | return copy_to_user(out, &mem, | ||
441 | min((size_t)size, sizeof(mem))) | ||
442 | ? -EFAULT : 0; | ||
443 | } | ||
414 | case AMDGPU_INFO_READ_MMR_REG: { | 444 | case AMDGPU_INFO_READ_MMR_REG: { |
415 | unsigned n, alloc_size; | 445 | unsigned n, alloc_size; |
416 | uint32_t *regs; | 446 | uint32_t *regs; |
@@ -475,6 +505,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
475 | dev_info.ids_flags = 0; | 505 | dev_info.ids_flags = 0; |
476 | if (adev->flags & AMD_IS_APU) | 506 | if (adev->flags & AMD_IS_APU) |
477 | dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION; | 507 | dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION; |
508 | if (amdgpu_sriov_vf(adev)) | ||
509 | dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION; | ||
478 | dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; | 510 | dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; |
479 | dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; | 511 | dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; |
480 | dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); | 512 | dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); |
@@ -494,6 +526,24 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
494 | return copy_to_user(out, &dev_info, | 526 | return copy_to_user(out, &dev_info, |
495 | min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; | 527 | min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; |
496 | } | 528 | } |
529 | case AMDGPU_INFO_VCE_CLOCK_TABLE: { | ||
530 | unsigned i; | ||
531 | struct drm_amdgpu_info_vce_clock_table vce_clk_table = {}; | ||
532 | struct amd_vce_state *vce_state; | ||
533 | |||
534 | for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) { | ||
535 | vce_state = amdgpu_dpm_get_vce_clock_state(adev, i); | ||
536 | if (vce_state) { | ||
537 | vce_clk_table.entries[i].sclk = vce_state->sclk; | ||
538 | vce_clk_table.entries[i].mclk = vce_state->mclk; | ||
539 | vce_clk_table.entries[i].eclk = vce_state->evclk; | ||
540 | vce_clk_table.num_valid_entries++; | ||
541 | } | ||
542 | } | ||
543 | |||
544 | return copy_to_user(out, &vce_clk_table, | ||
545 | min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0; | ||
546 | } | ||
497 | default: | 547 | default: |
498 | DRM_DEBUG_KMS("Invalid request %d\n", info->query); | 548 | DRM_DEBUG_KMS("Invalid request %d\n", info->query); |
499 | return -EINVAL; | 549 | return -EINVAL; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 32fa7b7913f7..7ea3cacf9f9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | |||
@@ -285,7 +285,7 @@ free_rmn: | |||
285 | int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) | 285 | int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) |
286 | { | 286 | { |
287 | unsigned long end = addr + amdgpu_bo_size(bo) - 1; | 287 | unsigned long end = addr + amdgpu_bo_size(bo) - 1; |
288 | struct amdgpu_device *adev = bo->adev; | 288 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
289 | struct amdgpu_mn *rmn; | 289 | struct amdgpu_mn *rmn; |
290 | struct amdgpu_mn_node *node = NULL; | 290 | struct amdgpu_mn_node *node = NULL; |
291 | struct list_head bos; | 291 | struct list_head bos; |
@@ -340,7 +340,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) | |||
340 | */ | 340 | */ |
341 | void amdgpu_mn_unregister(struct amdgpu_bo *bo) | 341 | void amdgpu_mn_unregister(struct amdgpu_bo *bo) |
342 | { | 342 | { |
343 | struct amdgpu_device *adev = bo->adev; | 343 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
344 | struct amdgpu_mn *rmn; | 344 | struct amdgpu_mn *rmn; |
345 | struct list_head *head; | 345 | struct list_head *head; |
346 | 346 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 7b0eff7d060b..1e23334b07fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h | |||
@@ -341,8 +341,6 @@ struct amdgpu_mode_info { | |||
341 | int num_dig; /* number of dig blocks */ | 341 | int num_dig; /* number of dig blocks */ |
342 | int disp_priority; | 342 | int disp_priority; |
343 | const struct amdgpu_display_funcs *funcs; | 343 | const struct amdgpu_display_funcs *funcs; |
344 | struct hrtimer vblank_timer; | ||
345 | enum amdgpu_interrupt_state vsync_timer_enabled; | ||
346 | }; | 344 | }; |
347 | 345 | ||
348 | #define AMDGPU_MAX_BL_LEVEL 0xFF | 346 | #define AMDGPU_MAX_BL_LEVEL 0xFF |
@@ -413,6 +411,9 @@ struct amdgpu_crtc { | |||
413 | u32 wm_high; | 411 | u32 wm_high; |
414 | u32 lb_vblank_lead_lines; | 412 | u32 lb_vblank_lead_lines; |
415 | struct drm_display_mode hw_mode; | 413 | struct drm_display_mode hw_mode; |
414 | /* for virtual dce */ | ||
415 | struct hrtimer vblank_timer; | ||
416 | enum amdgpu_interrupt_state vsync_timer_enabled; | ||
416 | }; | 417 | }; |
417 | 418 | ||
418 | struct amdgpu_encoder_atom_dig { | 419 | struct amdgpu_encoder_atom_dig { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index aa074fac0c7f..6efa8d73b394 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |||
@@ -88,18 +88,19 @@ static void amdgpu_update_memory_usage(struct amdgpu_device *adev, | |||
88 | 88 | ||
89 | static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) | 89 | static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) |
90 | { | 90 | { |
91 | struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); | ||
91 | struct amdgpu_bo *bo; | 92 | struct amdgpu_bo *bo; |
92 | 93 | ||
93 | bo = container_of(tbo, struct amdgpu_bo, tbo); | 94 | bo = container_of(tbo, struct amdgpu_bo, tbo); |
94 | 95 | ||
95 | amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL); | 96 | amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL); |
96 | 97 | ||
97 | drm_gem_object_release(&bo->gem_base); | 98 | drm_gem_object_release(&bo->gem_base); |
98 | amdgpu_bo_unref(&bo->parent); | 99 | amdgpu_bo_unref(&bo->parent); |
99 | if (!list_empty(&bo->shadow_list)) { | 100 | if (!list_empty(&bo->shadow_list)) { |
100 | mutex_lock(&bo->adev->shadow_list_lock); | 101 | mutex_lock(&adev->shadow_list_lock); |
101 | list_del_init(&bo->shadow_list); | 102 | list_del_init(&bo->shadow_list); |
102 | mutex_unlock(&bo->adev->shadow_list_lock); | 103 | mutex_unlock(&adev->shadow_list_lock); |
103 | } | 104 | } |
104 | kfree(bo->metadata); | 105 | kfree(bo->metadata); |
105 | kfree(bo); | 106 | kfree(bo); |
@@ -121,12 +122,17 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, | |||
121 | 122 | ||
122 | if (domain & AMDGPU_GEM_DOMAIN_VRAM) { | 123 | if (domain & AMDGPU_GEM_DOMAIN_VRAM) { |
123 | unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT; | 124 | unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT; |
125 | unsigned lpfn = 0; | ||
126 | |||
127 | /* This forces a reallocation if the flag wasn't set before */ | ||
128 | if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) | ||
129 | lpfn = adev->mc.real_vram_size >> PAGE_SHIFT; | ||
124 | 130 | ||
125 | if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS && | 131 | if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS && |
126 | !(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && | 132 | !(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && |
127 | adev->mc.visible_vram_size < adev->mc.real_vram_size) { | 133 | adev->mc.visible_vram_size < adev->mc.real_vram_size) { |
128 | places[c].fpfn = visible_pfn; | 134 | places[c].fpfn = visible_pfn; |
129 | places[c].lpfn = 0; | 135 | places[c].lpfn = lpfn; |
130 | places[c].flags = TTM_PL_FLAG_WC | | 136 | places[c].flags = TTM_PL_FLAG_WC | |
131 | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM | | 137 | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM | |
132 | TTM_PL_FLAG_TOPDOWN; | 138 | TTM_PL_FLAG_TOPDOWN; |
@@ -134,7 +140,7 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, | |||
134 | } | 140 | } |
135 | 141 | ||
136 | places[c].fpfn = 0; | 142 | places[c].fpfn = 0; |
137 | places[c].lpfn = 0; | 143 | places[c].lpfn = lpfn; |
138 | places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | | 144 | places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | |
139 | TTM_PL_FLAG_VRAM; | 145 | TTM_PL_FLAG_VRAM; |
140 | if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) | 146 | if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) |
@@ -205,8 +211,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, | |||
205 | 211 | ||
206 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain) | 212 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain) |
207 | { | 213 | { |
208 | amdgpu_ttm_placement_init(abo->adev, &abo->placement, | 214 | struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); |
209 | abo->placements, domain, abo->flags); | 215 | |
216 | amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements, | ||
217 | domain, abo->flags); | ||
210 | } | 218 | } |
211 | 219 | ||
212 | static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, | 220 | static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, |
@@ -245,7 +253,8 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, | |||
245 | int r; | 253 | int r; |
246 | 254 | ||
247 | r = amdgpu_bo_create(adev, size, align, true, domain, | 255 | r = amdgpu_bo_create(adev, size, align, true, domain, |
248 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 256 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
257 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
249 | NULL, NULL, bo_ptr); | 258 | NULL, NULL, bo_ptr); |
250 | if (r) { | 259 | if (r) { |
251 | dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r); | 260 | dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r); |
@@ -351,7 +360,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | |||
351 | kfree(bo); | 360 | kfree(bo); |
352 | return r; | 361 | return r; |
353 | } | 362 | } |
354 | bo->adev = adev; | ||
355 | INIT_LIST_HEAD(&bo->shadow_list); | 363 | INIT_LIST_HEAD(&bo->shadow_list); |
356 | INIT_LIST_HEAD(&bo->va); | 364 | INIT_LIST_HEAD(&bo->va); |
357 | bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | | 365 | bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | |
@@ -616,6 +624,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, | |||
616 | u64 min_offset, u64 max_offset, | 624 | u64 min_offset, u64 max_offset, |
617 | u64 *gpu_addr) | 625 | u64 *gpu_addr) |
618 | { | 626 | { |
627 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); | ||
619 | int r, i; | 628 | int r, i; |
620 | unsigned fpfn, lpfn; | 629 | unsigned fpfn, lpfn; |
621 | 630 | ||
@@ -643,18 +652,20 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, | |||
643 | 652 | ||
644 | return 0; | 653 | return 0; |
645 | } | 654 | } |
655 | |||
656 | bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; | ||
646 | amdgpu_ttm_placement_from_domain(bo, domain); | 657 | amdgpu_ttm_placement_from_domain(bo, domain); |
647 | for (i = 0; i < bo->placement.num_placement; i++) { | 658 | for (i = 0; i < bo->placement.num_placement; i++) { |
648 | /* force to pin into visible video ram */ | 659 | /* force to pin into visible video ram */ |
649 | if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && | 660 | if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && |
650 | !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) && | 661 | !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) && |
651 | (!max_offset || max_offset > | 662 | (!max_offset || max_offset > |
652 | bo->adev->mc.visible_vram_size)) { | 663 | adev->mc.visible_vram_size)) { |
653 | if (WARN_ON_ONCE(min_offset > | 664 | if (WARN_ON_ONCE(min_offset > |
654 | bo->adev->mc.visible_vram_size)) | 665 | adev->mc.visible_vram_size)) |
655 | return -EINVAL; | 666 | return -EINVAL; |
656 | fpfn = min_offset >> PAGE_SHIFT; | 667 | fpfn = min_offset >> PAGE_SHIFT; |
657 | lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT; | 668 | lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; |
658 | } else { | 669 | } else { |
659 | fpfn = min_offset >> PAGE_SHIFT; | 670 | fpfn = min_offset >> PAGE_SHIFT; |
660 | lpfn = max_offset >> PAGE_SHIFT; | 671 | lpfn = max_offset >> PAGE_SHIFT; |
@@ -669,12 +680,12 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, | |||
669 | 680 | ||
670 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | 681 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
671 | if (unlikely(r)) { | 682 | if (unlikely(r)) { |
672 | dev_err(bo->adev->dev, "%p pin failed\n", bo); | 683 | dev_err(adev->dev, "%p pin failed\n", bo); |
673 | goto error; | 684 | goto error; |
674 | } | 685 | } |
675 | r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); | 686 | r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem); |
676 | if (unlikely(r)) { | 687 | if (unlikely(r)) { |
677 | dev_err(bo->adev->dev, "%p bind failed\n", bo); | 688 | dev_err(adev->dev, "%p bind failed\n", bo); |
678 | goto error; | 689 | goto error; |
679 | } | 690 | } |
680 | 691 | ||
@@ -682,11 +693,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, | |||
682 | if (gpu_addr != NULL) | 693 | if (gpu_addr != NULL) |
683 | *gpu_addr = amdgpu_bo_gpu_offset(bo); | 694 | *gpu_addr = amdgpu_bo_gpu_offset(bo); |
684 | if (domain == AMDGPU_GEM_DOMAIN_VRAM) { | 695 | if (domain == AMDGPU_GEM_DOMAIN_VRAM) { |
685 | bo->adev->vram_pin_size += amdgpu_bo_size(bo); | 696 | adev->vram_pin_size += amdgpu_bo_size(bo); |
686 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) | 697 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) |
687 | bo->adev->invisible_pin_size += amdgpu_bo_size(bo); | 698 | adev->invisible_pin_size += amdgpu_bo_size(bo); |
688 | } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { | 699 | } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { |
689 | bo->adev->gart_pin_size += amdgpu_bo_size(bo); | 700 | adev->gart_pin_size += amdgpu_bo_size(bo); |
690 | } | 701 | } |
691 | 702 | ||
692 | error: | 703 | error: |
@@ -700,10 +711,11 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr) | |||
700 | 711 | ||
701 | int amdgpu_bo_unpin(struct amdgpu_bo *bo) | 712 | int amdgpu_bo_unpin(struct amdgpu_bo *bo) |
702 | { | 713 | { |
714 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); | ||
703 | int r, i; | 715 | int r, i; |
704 | 716 | ||
705 | if (!bo->pin_count) { | 717 | if (!bo->pin_count) { |
706 | dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo); | 718 | dev_warn(adev->dev, "%p unpin not necessary\n", bo); |
707 | return 0; | 719 | return 0; |
708 | } | 720 | } |
709 | bo->pin_count--; | 721 | bo->pin_count--; |
@@ -715,16 +727,16 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo) | |||
715 | } | 727 | } |
716 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | 728 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
717 | if (unlikely(r)) { | 729 | if (unlikely(r)) { |
718 | dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo); | 730 | dev_err(adev->dev, "%p validate failed for unpin\n", bo); |
719 | goto error; | 731 | goto error; |
720 | } | 732 | } |
721 | 733 | ||
722 | if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { | 734 | if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { |
723 | bo->adev->vram_pin_size -= amdgpu_bo_size(bo); | 735 | adev->vram_pin_size -= amdgpu_bo_size(bo); |
724 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) | 736 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) |
725 | bo->adev->invisible_pin_size -= amdgpu_bo_size(bo); | 737 | adev->invisible_pin_size -= amdgpu_bo_size(bo); |
726 | } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { | 738 | } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { |
727 | bo->adev->gart_pin_size -= amdgpu_bo_size(bo); | 739 | adev->gart_pin_size -= amdgpu_bo_size(bo); |
728 | } | 740 | } |
729 | 741 | ||
730 | error: | 742 | error: |
@@ -849,6 +861,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, | |||
849 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, | 861 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, |
850 | struct ttm_mem_reg *new_mem) | 862 | struct ttm_mem_reg *new_mem) |
851 | { | 863 | { |
864 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); | ||
852 | struct amdgpu_bo *abo; | 865 | struct amdgpu_bo *abo; |
853 | struct ttm_mem_reg *old_mem = &bo->mem; | 866 | struct ttm_mem_reg *old_mem = &bo->mem; |
854 | 867 | ||
@@ -856,21 +869,21 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, | |||
856 | return; | 869 | return; |
857 | 870 | ||
858 | abo = container_of(bo, struct amdgpu_bo, tbo); | 871 | abo = container_of(bo, struct amdgpu_bo, tbo); |
859 | amdgpu_vm_bo_invalidate(abo->adev, abo); | 872 | amdgpu_vm_bo_invalidate(adev, abo); |
860 | 873 | ||
861 | /* update statistics */ | 874 | /* update statistics */ |
862 | if (!new_mem) | 875 | if (!new_mem) |
863 | return; | 876 | return; |
864 | 877 | ||
865 | /* move_notify is called before move happens */ | 878 | /* move_notify is called before move happens */ |
866 | amdgpu_update_memory_usage(abo->adev, &bo->mem, new_mem); | 879 | amdgpu_update_memory_usage(adev, &bo->mem, new_mem); |
867 | 880 | ||
868 | trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type); | 881 | trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type); |
869 | } | 882 | } |
870 | 883 | ||
871 | int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | 884 | int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) |
872 | { | 885 | { |
873 | struct amdgpu_device *adev; | 886 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); |
874 | struct amdgpu_bo *abo; | 887 | struct amdgpu_bo *abo; |
875 | unsigned long offset, size, lpfn; | 888 | unsigned long offset, size, lpfn; |
876 | int i, r; | 889 | int i, r; |
@@ -879,13 +892,14 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
879 | return 0; | 892 | return 0; |
880 | 893 | ||
881 | abo = container_of(bo, struct amdgpu_bo, tbo); | 894 | abo = container_of(bo, struct amdgpu_bo, tbo); |
882 | adev = abo->adev; | ||
883 | if (bo->mem.mem_type != TTM_PL_VRAM) | 895 | if (bo->mem.mem_type != TTM_PL_VRAM) |
884 | return 0; | 896 | return 0; |
885 | 897 | ||
886 | size = bo->mem.num_pages << PAGE_SHIFT; | 898 | size = bo->mem.num_pages << PAGE_SHIFT; |
887 | offset = bo->mem.start << PAGE_SHIFT; | 899 | offset = bo->mem.start << PAGE_SHIFT; |
888 | if ((offset + size) <= adev->mc.visible_vram_size) | 900 | /* TODO: figure out how to map scattered VRAM to the CPU */ |
901 | if ((offset + size) <= adev->mc.visible_vram_size && | ||
902 | (abo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) | ||
889 | return 0; | 903 | return 0; |
890 | 904 | ||
891 | /* Can't move a pinned BO to visible VRAM */ | 905 | /* Can't move a pinned BO to visible VRAM */ |
@@ -893,6 +907,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
893 | return -EINVAL; | 907 | return -EINVAL; |
894 | 908 | ||
895 | /* hurrah the memory is not visible ! */ | 909 | /* hurrah the memory is not visible ! */ |
910 | abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; | ||
896 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM); | 911 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM); |
897 | lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; | 912 | lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; |
898 | for (i = 0; i < abo->placement.num_placement; i++) { | 913 | for (i = 0; i < abo->placement.num_placement; i++) { |
@@ -954,6 +969,8 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) | |||
954 | WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) && | 969 | WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) && |
955 | !bo->pin_count); | 970 | !bo->pin_count); |
956 | WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); | 971 | WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); |
972 | WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM && | ||
973 | !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)); | ||
957 | 974 | ||
958 | return bo->tbo.offset; | 975 | return bo->tbo.offset; |
959 | } | 976 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 8255034d73eb..d3baf834ac24 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | |||
@@ -71,12 +71,13 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type) | |||
71 | */ | 71 | */ |
72 | static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr) | 72 | static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr) |
73 | { | 73 | { |
74 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); | ||
74 | int r; | 75 | int r; |
75 | 76 | ||
76 | r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); | 77 | r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); |
77 | if (unlikely(r != 0)) { | 78 | if (unlikely(r != 0)) { |
78 | if (r != -ERESTARTSYS) | 79 | if (r != -ERESTARTSYS) |
79 | dev_err(bo->adev->dev, "%p reserve failed\n", bo); | 80 | dev_err(adev->dev, "%p reserve failed\n", bo); |
80 | return r; | 81 | return r; |
81 | } | 82 | } |
82 | return 0; | 83 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index accc908bdc88..274f3309aec9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | |||
@@ -986,10 +986,10 @@ restart_search: | |||
986 | 986 | ||
987 | static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) | 987 | static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) |
988 | { | 988 | { |
989 | int i; | ||
990 | struct amdgpu_ps *ps; | 989 | struct amdgpu_ps *ps; |
991 | enum amd_pm_state_type dpm_state; | 990 | enum amd_pm_state_type dpm_state; |
992 | int ret; | 991 | int ret; |
992 | bool equal; | ||
993 | 993 | ||
994 | /* if dpm init failed */ | 994 | /* if dpm init failed */ |
995 | if (!adev->pm.dpm_enabled) | 995 | if (!adev->pm.dpm_enabled) |
@@ -1009,46 +1009,6 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) | |||
1009 | else | 1009 | else |
1010 | return; | 1010 | return; |
1011 | 1011 | ||
1012 | /* no need to reprogram if nothing changed unless we are on BTC+ */ | ||
1013 | if (adev->pm.dpm.current_ps == adev->pm.dpm.requested_ps) { | ||
1014 | /* vce just modifies an existing state so force a change */ | ||
1015 | if (ps->vce_active != adev->pm.dpm.vce_active) | ||
1016 | goto force; | ||
1017 | if (adev->flags & AMD_IS_APU) { | ||
1018 | /* for APUs if the num crtcs changed but state is the same, | ||
1019 | * all we need to do is update the display configuration. | ||
1020 | */ | ||
1021 | if (adev->pm.dpm.new_active_crtcs != adev->pm.dpm.current_active_crtcs) { | ||
1022 | /* update display watermarks based on new power state */ | ||
1023 | amdgpu_display_bandwidth_update(adev); | ||
1024 | /* update displays */ | ||
1025 | amdgpu_dpm_display_configuration_changed(adev); | ||
1026 | adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; | ||
1027 | adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; | ||
1028 | } | ||
1029 | return; | ||
1030 | } else { | ||
1031 | /* for BTC+ if the num crtcs hasn't changed and state is the same, | ||
1032 | * nothing to do, if the num crtcs is > 1 and state is the same, | ||
1033 | * update display configuration. | ||
1034 | */ | ||
1035 | if (adev->pm.dpm.new_active_crtcs == | ||
1036 | adev->pm.dpm.current_active_crtcs) { | ||
1037 | return; | ||
1038 | } else if ((adev->pm.dpm.current_active_crtc_count > 1) && | ||
1039 | (adev->pm.dpm.new_active_crtc_count > 1)) { | ||
1040 | /* update display watermarks based on new power state */ | ||
1041 | amdgpu_display_bandwidth_update(adev); | ||
1042 | /* update displays */ | ||
1043 | amdgpu_dpm_display_configuration_changed(adev); | ||
1044 | adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; | ||
1045 | adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; | ||
1046 | return; | ||
1047 | } | ||
1048 | } | ||
1049 | } | ||
1050 | |||
1051 | force: | ||
1052 | if (amdgpu_dpm == 1) { | 1012 | if (amdgpu_dpm == 1) { |
1053 | printk("switching from power state:\n"); | 1013 | printk("switching from power state:\n"); |
1054 | amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); | 1014 | amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); |
@@ -1059,31 +1019,21 @@ force: | |||
1059 | /* update whether vce is active */ | 1019 | /* update whether vce is active */ |
1060 | ps->vce_active = adev->pm.dpm.vce_active; | 1020 | ps->vce_active = adev->pm.dpm.vce_active; |
1061 | 1021 | ||
1022 | amdgpu_dpm_display_configuration_changed(adev); | ||
1023 | |||
1062 | ret = amdgpu_dpm_pre_set_power_state(adev); | 1024 | ret = amdgpu_dpm_pre_set_power_state(adev); |
1063 | if (ret) | 1025 | if (ret) |
1064 | return; | 1026 | return; |
1065 | 1027 | ||
1066 | /* update display watermarks based on new power state */ | 1028 | if ((0 != amgdpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))) |
1067 | amdgpu_display_bandwidth_update(adev); | 1029 | equal = false; |
1068 | 1030 | ||
1069 | /* wait for the rings to drain */ | 1031 | if (equal) |
1070 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { | 1032 | return; |
1071 | struct amdgpu_ring *ring = adev->rings[i]; | ||
1072 | if (ring && ring->ready) | ||
1073 | amdgpu_fence_wait_empty(ring); | ||
1074 | } | ||
1075 | 1033 | ||
1076 | /* program the new power state */ | ||
1077 | amdgpu_dpm_set_power_state(adev); | 1034 | amdgpu_dpm_set_power_state(adev); |
1078 | |||
1079 | /* update current power state */ | ||
1080 | adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps; | ||
1081 | |||
1082 | amdgpu_dpm_post_set_power_state(adev); | 1035 | amdgpu_dpm_post_set_power_state(adev); |
1083 | 1036 | ||
1084 | /* update displays */ | ||
1085 | amdgpu_dpm_display_configuration_changed(adev); | ||
1086 | |||
1087 | adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; | 1037 | adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; |
1088 | adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; | 1038 | adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; |
1089 | 1039 | ||
@@ -1135,7 +1085,7 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) | |||
1135 | mutex_lock(&adev->pm.mutex); | 1085 | mutex_lock(&adev->pm.mutex); |
1136 | adev->pm.dpm.vce_active = true; | 1086 | adev->pm.dpm.vce_active = true; |
1137 | /* XXX select vce level based on ring/task */ | 1087 | /* XXX select vce level based on ring/task */ |
1138 | adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL; | 1088 | adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; |
1139 | mutex_unlock(&adev->pm.mutex); | 1089 | mutex_unlock(&adev->pm.mutex); |
1140 | } else { | 1090 | } else { |
1141 | mutex_lock(&adev->pm.mutex); | 1091 | mutex_lock(&adev->pm.mutex); |
@@ -1276,20 +1226,20 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) | |||
1276 | struct drm_device *ddev = adev->ddev; | 1226 | struct drm_device *ddev = adev->ddev; |
1277 | struct drm_crtc *crtc; | 1227 | struct drm_crtc *crtc; |
1278 | struct amdgpu_crtc *amdgpu_crtc; | 1228 | struct amdgpu_crtc *amdgpu_crtc; |
1229 | int i = 0; | ||
1279 | 1230 | ||
1280 | if (!adev->pm.dpm_enabled) | 1231 | if (!adev->pm.dpm_enabled) |
1281 | return; | 1232 | return; |
1282 | 1233 | ||
1283 | if (adev->pp_enabled) { | 1234 | amdgpu_display_bandwidth_update(adev); |
1284 | int i = 0; | ||
1285 | 1235 | ||
1286 | amdgpu_display_bandwidth_update(adev); | 1236 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { |
1287 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { | 1237 | struct amdgpu_ring *ring = adev->rings[i]; |
1288 | struct amdgpu_ring *ring = adev->rings[i]; | 1238 | if (ring && ring->ready) |
1289 | if (ring && ring->ready) | 1239 | amdgpu_fence_wait_empty(ring); |
1290 | amdgpu_fence_wait_empty(ring); | 1240 | } |
1291 | } | ||
1292 | 1241 | ||
1242 | if (adev->pp_enabled) { | ||
1293 | amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL); | 1243 | amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL); |
1294 | } else { | 1244 | } else { |
1295 | mutex_lock(&adev->pm.mutex); | 1245 | mutex_lock(&adev->pm.mutex); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index 7532ff822aa7..fa6baf31a35d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c | |||
@@ -299,7 +299,7 @@ static int amdgpu_pp_soft_reset(void *handle) | |||
299 | return ret; | 299 | return ret; |
300 | } | 300 | } |
301 | 301 | ||
302 | const struct amd_ip_funcs amdgpu_pp_ip_funcs = { | 302 | static const struct amd_ip_funcs amdgpu_pp_ip_funcs = { |
303 | .name = "amdgpu_powerplay", | 303 | .name = "amdgpu_powerplay", |
304 | .early_init = amdgpu_pp_early_init, | 304 | .early_init = amdgpu_pp_early_init, |
305 | .late_init = amdgpu_pp_late_init, | 305 | .late_init = amdgpu_pp_late_init, |
@@ -316,3 +316,12 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = { | |||
316 | .set_clockgating_state = amdgpu_pp_set_clockgating_state, | 316 | .set_clockgating_state = amdgpu_pp_set_clockgating_state, |
317 | .set_powergating_state = amdgpu_pp_set_powergating_state, | 317 | .set_powergating_state = amdgpu_pp_set_powergating_state, |
318 | }; | 318 | }; |
319 | |||
320 | const struct amdgpu_ip_block_version amdgpu_pp_ip_block = | ||
321 | { | ||
322 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
323 | .major = 1, | ||
324 | .minor = 0, | ||
325 | .rev = 0, | ||
326 | .funcs = &amdgpu_pp_ip_funcs, | ||
327 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h index da5cf47cfd99..c0c4bfdcdb14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h | |||
@@ -23,11 +23,11 @@ | |||
23 | * | 23 | * |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #ifndef __AMDGPU_POPWERPLAY_H__ | 26 | #ifndef __AMDGPU_POWERPLAY_H__ |
27 | #define __AMDGPU_POPWERPLAY_H__ | 27 | #define __AMDGPU_POWERPLAY_H__ |
28 | 28 | ||
29 | #include "amd_shared.h" | 29 | #include "amd_shared.h" |
30 | 30 | ||
31 | extern const struct amd_ip_funcs amdgpu_pp_ip_funcs; | 31 | extern const struct amdgpu_ip_block_version amdgpu_pp_ip_block; |
32 | 32 | ||
33 | #endif /* __AMDSOC_DM_H__ */ | 33 | #endif /* __AMDGPU_POWERPLAY_H__ */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 3cb5e903cd62..4c992826d2d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | |||
@@ -65,7 +65,7 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw) | |||
65 | { | 65 | { |
66 | /* Align requested size with padding so unlock_commit can | 66 | /* Align requested size with padding so unlock_commit can |
67 | * pad safely */ | 67 | * pad safely */ |
68 | ndw = (ndw + ring->align_mask) & ~ring->align_mask; | 68 | ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask; |
69 | 69 | ||
70 | /* Make sure we aren't trying to allocate more space | 70 | /* Make sure we aren't trying to allocate more space |
71 | * than the maximum for one submission | 71 | * than the maximum for one submission |
@@ -94,7 +94,7 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | |||
94 | int i; | 94 | int i; |
95 | 95 | ||
96 | for (i = 0; i < count; i++) | 96 | for (i = 0; i < count; i++) |
97 | amdgpu_ring_write(ring, ring->nop); | 97 | amdgpu_ring_write(ring, ring->funcs->nop); |
98 | } | 98 | } |
99 | 99 | ||
100 | /** amdgpu_ring_generic_pad_ib - pad IB with NOP packets | 100 | /** amdgpu_ring_generic_pad_ib - pad IB with NOP packets |
@@ -106,8 +106,8 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | |||
106 | */ | 106 | */ |
107 | void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) | 107 | void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) |
108 | { | 108 | { |
109 | while (ib->length_dw & ring->align_mask) | 109 | while (ib->length_dw & ring->funcs->align_mask) |
110 | ib->ptr[ib->length_dw++] = ring->nop; | 110 | ib->ptr[ib->length_dw++] = ring->funcs->nop; |
111 | } | 111 | } |
112 | 112 | ||
113 | /** | 113 | /** |
@@ -125,8 +125,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring) | |||
125 | uint32_t count; | 125 | uint32_t count; |
126 | 126 | ||
127 | /* We pad to match fetch size */ | 127 | /* We pad to match fetch size */ |
128 | count = ring->align_mask + 1 - (ring->wptr & ring->align_mask); | 128 | count = ring->funcs->align_mask + 1 - |
129 | count %= ring->align_mask + 1; | 129 | (ring->wptr & ring->funcs->align_mask); |
130 | count %= ring->funcs->align_mask + 1; | ||
130 | ring->funcs->insert_nop(ring, count); | 131 | ring->funcs->insert_nop(ring, count); |
131 | 132 | ||
132 | mb(); | 133 | mb(); |
@@ -163,9 +164,8 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring) | |||
163 | * Returns 0 on success, error on failure. | 164 | * Returns 0 on success, error on failure. |
164 | */ | 165 | */ |
165 | int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, | 166 | int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, |
166 | unsigned max_dw, u32 nop, u32 align_mask, | 167 | unsigned max_dw, struct amdgpu_irq_src *irq_src, |
167 | struct amdgpu_irq_src *irq_src, unsigned irq_type, | 168 | unsigned irq_type) |
168 | enum amdgpu_ring_type ring_type) | ||
169 | { | 169 | { |
170 | int r; | 170 | int r; |
171 | 171 | ||
@@ -216,9 +216,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, | |||
216 | 216 | ||
217 | ring->ring_size = roundup_pow_of_two(max_dw * 4 * | 217 | ring->ring_size = roundup_pow_of_two(max_dw * 4 * |
218 | amdgpu_sched_hw_submission); | 218 | amdgpu_sched_hw_submission); |
219 | ring->align_mask = align_mask; | ||
220 | ring->nop = nop; | ||
221 | ring->type = ring_type; | ||
222 | 219 | ||
223 | /* Allocate ring buffer */ | 220 | /* Allocate ring buffer */ |
224 | if (ring->ring_obj == NULL) { | 221 | if (ring->ring_obj == NULL) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h new file mode 100644 index 000000000000..1ee1b65d7eff --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | |||
@@ -0,0 +1,185 @@ | |||
1 | /* | ||
2 | * Copyright 2016 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Christian König | ||
23 | */ | ||
24 | #ifndef __AMDGPU_RING_H__ | ||
25 | #define __AMDGPU_RING_H__ | ||
26 | |||
27 | #include "gpu_scheduler.h" | ||
28 | |||
29 | /* max number of rings */ | ||
30 | #define AMDGPU_MAX_RINGS 16 | ||
31 | #define AMDGPU_MAX_GFX_RINGS 1 | ||
32 | #define AMDGPU_MAX_COMPUTE_RINGS 8 | ||
33 | #define AMDGPU_MAX_VCE_RINGS 3 | ||
34 | |||
35 | /* some special values for the owner field */ | ||
36 | #define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul) | ||
37 | #define AMDGPU_FENCE_OWNER_VM ((void*)1ul) | ||
38 | |||
39 | #define AMDGPU_FENCE_FLAG_64BIT (1 << 0) | ||
40 | #define AMDGPU_FENCE_FLAG_INT (1 << 1) | ||
41 | |||
42 | enum amdgpu_ring_type { | ||
43 | AMDGPU_RING_TYPE_GFX, | ||
44 | AMDGPU_RING_TYPE_COMPUTE, | ||
45 | AMDGPU_RING_TYPE_SDMA, | ||
46 | AMDGPU_RING_TYPE_UVD, | ||
47 | AMDGPU_RING_TYPE_VCE | ||
48 | }; | ||
49 | |||
50 | struct amdgpu_device; | ||
51 | struct amdgpu_ring; | ||
52 | struct amdgpu_ib; | ||
53 | struct amdgpu_cs_parser; | ||
54 | |||
55 | /* | ||
56 | * Fences. | ||
57 | */ | ||
58 | struct amdgpu_fence_driver { | ||
59 | uint64_t gpu_addr; | ||
60 | volatile uint32_t *cpu_addr; | ||
61 | /* sync_seq is protected by ring emission lock */ | ||
62 | uint32_t sync_seq; | ||
63 | atomic_t last_seq; | ||
64 | bool initialized; | ||
65 | struct amdgpu_irq_src *irq_src; | ||
66 | unsigned irq_type; | ||
67 | struct timer_list fallback_timer; | ||
68 | unsigned num_fences_mask; | ||
69 | spinlock_t lock; | ||
70 | struct fence **fences; | ||
71 | }; | ||
72 | |||
73 | int amdgpu_fence_driver_init(struct amdgpu_device *adev); | ||
74 | void amdgpu_fence_driver_fini(struct amdgpu_device *adev); | ||
75 | void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); | ||
76 | |||
77 | int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, | ||
78 | unsigned num_hw_submission); | ||
79 | int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, | ||
80 | struct amdgpu_irq_src *irq_src, | ||
81 | unsigned irq_type); | ||
82 | void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); | ||
83 | void amdgpu_fence_driver_resume(struct amdgpu_device *adev); | ||
84 | int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence); | ||
85 | void amdgpu_fence_process(struct amdgpu_ring *ring); | ||
86 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); | ||
87 | unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); | ||
88 | |||
89 | /* | ||
90 | * Rings. | ||
91 | */ | ||
92 | |||
93 | /* provided by hw blocks that expose a ring buffer for commands */ | ||
94 | struct amdgpu_ring_funcs { | ||
95 | enum amdgpu_ring_type type; | ||
96 | uint32_t align_mask; | ||
97 | u32 nop; | ||
98 | |||
99 | /* ring read/write ptr handling */ | ||
100 | u32 (*get_rptr)(struct amdgpu_ring *ring); | ||
101 | u32 (*get_wptr)(struct amdgpu_ring *ring); | ||
102 | void (*set_wptr)(struct amdgpu_ring *ring); | ||
103 | /* validating and patching of IBs */ | ||
104 | int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx); | ||
105 | /* constants to calculate how many DW are needed for an emit */ | ||
106 | unsigned emit_frame_size; | ||
107 | unsigned emit_ib_size; | ||
108 | /* command emit functions */ | ||
109 | void (*emit_ib)(struct amdgpu_ring *ring, | ||
110 | struct amdgpu_ib *ib, | ||
111 | unsigned vm_id, bool ctx_switch); | ||
112 | void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, | ||
113 | uint64_t seq, unsigned flags); | ||
114 | void (*emit_pipeline_sync)(struct amdgpu_ring *ring); | ||
115 | void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id, | ||
116 | uint64_t pd_addr); | ||
117 | void (*emit_hdp_flush)(struct amdgpu_ring *ring); | ||
118 | void (*emit_hdp_invalidate)(struct amdgpu_ring *ring); | ||
119 | void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid, | ||
120 | uint32_t gds_base, uint32_t gds_size, | ||
121 | uint32_t gws_base, uint32_t gws_size, | ||
122 | uint32_t oa_base, uint32_t oa_size); | ||
123 | /* testing functions */ | ||
124 | int (*test_ring)(struct amdgpu_ring *ring); | ||
125 | int (*test_ib)(struct amdgpu_ring *ring, long timeout); | ||
126 | /* insert NOP packets */ | ||
127 | void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); | ||
128 | /* pad the indirect buffer to the necessary number of dw */ | ||
129 | void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib); | ||
130 | unsigned (*init_cond_exec)(struct amdgpu_ring *ring); | ||
131 | void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset); | ||
132 | /* note usage for clock and power gating */ | ||
133 | void (*begin_use)(struct amdgpu_ring *ring); | ||
134 | void (*end_use)(struct amdgpu_ring *ring); | ||
135 | void (*emit_switch_buffer) (struct amdgpu_ring *ring); | ||
136 | void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags); | ||
137 | }; | ||
138 | |||
139 | struct amdgpu_ring { | ||
140 | struct amdgpu_device *adev; | ||
141 | const struct amdgpu_ring_funcs *funcs; | ||
142 | struct amdgpu_fence_driver fence_drv; | ||
143 | struct amd_gpu_scheduler sched; | ||
144 | |||
145 | struct amdgpu_bo *ring_obj; | ||
146 | volatile uint32_t *ring; | ||
147 | unsigned rptr_offs; | ||
148 | unsigned wptr; | ||
149 | unsigned wptr_old; | ||
150 | unsigned ring_size; | ||
151 | unsigned max_dw; | ||
152 | int count_dw; | ||
153 | uint64_t gpu_addr; | ||
154 | uint32_t ptr_mask; | ||
155 | bool ready; | ||
156 | u32 idx; | ||
157 | u32 me; | ||
158 | u32 pipe; | ||
159 | u32 queue; | ||
160 | struct amdgpu_bo *mqd_obj; | ||
161 | u32 doorbell_index; | ||
162 | bool use_doorbell; | ||
163 | unsigned wptr_offs; | ||
164 | unsigned fence_offs; | ||
165 | uint64_t current_ctx; | ||
166 | char name[16]; | ||
167 | unsigned cond_exe_offs; | ||
168 | u64 cond_exe_gpu_addr; | ||
169 | volatile u32 *cond_exe_cpu_addr; | ||
170 | #if defined(CONFIG_DEBUG_FS) | ||
171 | struct dentry *ent; | ||
172 | #endif | ||
173 | }; | ||
174 | |||
175 | int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); | ||
176 | void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); | ||
177 | void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); | ||
178 | void amdgpu_ring_commit(struct amdgpu_ring *ring); | ||
179 | void amdgpu_ring_undo(struct amdgpu_ring *ring); | ||
180 | int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, | ||
181 | unsigned ring_size, struct amdgpu_irq_src *irq_src, | ||
182 | unsigned irq_type); | ||
183 | void amdgpu_ring_fini(struct amdgpu_ring *ring); | ||
184 | |||
185 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h new file mode 100644 index 000000000000..405f379ac186 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * Copyright 2016 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Christian König | ||
23 | */ | ||
24 | #ifndef __AMDGPU_SYNC_H__ | ||
25 | #define __AMDGPU_SYNC_H__ | ||
26 | |||
27 | #include <linux/hashtable.h> | ||
28 | |||
29 | struct fence; | ||
30 | struct reservation_object; | ||
31 | struct amdgpu_device; | ||
32 | struct amdgpu_ring; | ||
33 | |||
34 | /* | ||
35 | * Container for fences used to sync command submissions. | ||
36 | */ | ||
37 | struct amdgpu_sync { | ||
38 | DECLARE_HASHTABLE(fences, 4); | ||
39 | struct fence *last_vm_update; | ||
40 | }; | ||
41 | |||
42 | void amdgpu_sync_create(struct amdgpu_sync *sync); | ||
43 | int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, | ||
44 | struct fence *f); | ||
45 | int amdgpu_sync_resv(struct amdgpu_device *adev, | ||
46 | struct amdgpu_sync *sync, | ||
47 | struct reservation_object *resv, | ||
48 | void *owner); | ||
49 | struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, | ||
50 | struct amdgpu_ring *ring); | ||
51 | struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); | ||
52 | void amdgpu_sync_free(struct amdgpu_sync *sync); | ||
53 | int amdgpu_sync_init(void); | ||
54 | void amdgpu_sync_fini(void); | ||
55 | |||
56 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index dcaf691f56b5..f1a206df9823 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
@@ -51,16 +51,6 @@ | |||
51 | static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); | 51 | static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); |
52 | static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); | 52 | static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); |
53 | 53 | ||
54 | static struct amdgpu_device *amdgpu_get_adev(struct ttm_bo_device *bdev) | ||
55 | { | ||
56 | struct amdgpu_mman *mman; | ||
57 | struct amdgpu_device *adev; | ||
58 | |||
59 | mman = container_of(bdev, struct amdgpu_mman, bdev); | ||
60 | adev = container_of(mman, struct amdgpu_device, mman); | ||
61 | return adev; | ||
62 | } | ||
63 | |||
64 | 54 | ||
65 | /* | 55 | /* |
66 | * Global memory. | 56 | * Global memory. |
@@ -150,7 +140,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
150 | { | 140 | { |
151 | struct amdgpu_device *adev; | 141 | struct amdgpu_device *adev; |
152 | 142 | ||
153 | adev = amdgpu_get_adev(bdev); | 143 | adev = amdgpu_ttm_adev(bdev); |
154 | 144 | ||
155 | switch (type) { | 145 | switch (type) { |
156 | case TTM_PL_SYSTEM: | 146 | case TTM_PL_SYSTEM: |
@@ -168,7 +158,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
168 | break; | 158 | break; |
169 | case TTM_PL_VRAM: | 159 | case TTM_PL_VRAM: |
170 | /* "On-card" video ram */ | 160 | /* "On-card" video ram */ |
171 | man->func = &ttm_bo_manager_func; | 161 | man->func = &amdgpu_vram_mgr_func; |
172 | man->gpu_offset = adev->mc.vram_start; | 162 | man->gpu_offset = adev->mc.vram_start; |
173 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | 163 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
174 | TTM_MEMTYPE_FLAG_MAPPABLE; | 164 | TTM_MEMTYPE_FLAG_MAPPABLE; |
@@ -195,6 +185,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
195 | static void amdgpu_evict_flags(struct ttm_buffer_object *bo, | 185 | static void amdgpu_evict_flags(struct ttm_buffer_object *bo, |
196 | struct ttm_placement *placement) | 186 | struct ttm_placement *placement) |
197 | { | 187 | { |
188 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); | ||
198 | struct amdgpu_bo *abo; | 189 | struct amdgpu_bo *abo; |
199 | static struct ttm_place placements = { | 190 | static struct ttm_place placements = { |
200 | .fpfn = 0, | 191 | .fpfn = 0, |
@@ -213,7 +204,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, | |||
213 | abo = container_of(bo, struct amdgpu_bo, tbo); | 204 | abo = container_of(bo, struct amdgpu_bo, tbo); |
214 | switch (bo->mem.mem_type) { | 205 | switch (bo->mem.mem_type) { |
215 | case TTM_PL_VRAM: | 206 | case TTM_PL_VRAM: |
216 | if (abo->adev->mman.buffer_funcs_ring->ready == false) { | 207 | if (adev->mman.buffer_funcs_ring->ready == false) { |
217 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); | 208 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); |
218 | } else { | 209 | } else { |
219 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); | 210 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); |
@@ -229,7 +220,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, | |||
229 | * allocating address space for the BO. | 220 | * allocating address space for the BO. |
230 | */ | 221 | */ |
231 | abo->placements[i].lpfn = | 222 | abo->placements[i].lpfn = |
232 | abo->adev->mc.gtt_size >> PAGE_SHIFT; | 223 | adev->mc.gtt_size >> PAGE_SHIFT; |
233 | } | 224 | } |
234 | } | 225 | } |
235 | break; | 226 | break; |
@@ -260,64 +251,116 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo, | |||
260 | new_mem->mm_node = NULL; | 251 | new_mem->mm_node = NULL; |
261 | } | 252 | } |
262 | 253 | ||
263 | static int amdgpu_move_blit(struct ttm_buffer_object *bo, | 254 | static int amdgpu_mm_node_addr(struct ttm_buffer_object *bo, |
264 | bool evict, bool no_wait_gpu, | 255 | struct drm_mm_node *mm_node, |
265 | struct ttm_mem_reg *new_mem, | 256 | struct ttm_mem_reg *mem, |
266 | struct ttm_mem_reg *old_mem) | 257 | uint64_t *addr) |
267 | { | 258 | { |
268 | struct amdgpu_device *adev; | ||
269 | struct amdgpu_ring *ring; | ||
270 | uint64_t old_start, new_start; | ||
271 | struct fence *fence; | ||
272 | int r; | 259 | int r; |
273 | 260 | ||
274 | adev = amdgpu_get_adev(bo->bdev); | 261 | switch (mem->mem_type) { |
275 | ring = adev->mman.buffer_funcs_ring; | ||
276 | |||
277 | switch (old_mem->mem_type) { | ||
278 | case TTM_PL_TT: | 262 | case TTM_PL_TT: |
279 | r = amdgpu_ttm_bind(bo, old_mem); | 263 | r = amdgpu_ttm_bind(bo, mem); |
280 | if (r) | 264 | if (r) |
281 | return r; | 265 | return r; |
282 | 266 | ||
283 | case TTM_PL_VRAM: | 267 | case TTM_PL_VRAM: |
284 | old_start = (u64)old_mem->start << PAGE_SHIFT; | 268 | *addr = mm_node->start << PAGE_SHIFT; |
285 | old_start += bo->bdev->man[old_mem->mem_type].gpu_offset; | 269 | *addr += bo->bdev->man[mem->mem_type].gpu_offset; |
286 | break; | 270 | break; |
287 | default: | 271 | default: |
288 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | 272 | DRM_ERROR("Unknown placement %d\n", mem->mem_type); |
289 | return -EINVAL; | 273 | return -EINVAL; |
290 | } | 274 | } |
291 | switch (new_mem->mem_type) { | ||
292 | case TTM_PL_TT: | ||
293 | r = amdgpu_ttm_bind(bo, new_mem); | ||
294 | if (r) | ||
295 | return r; | ||
296 | 275 | ||
297 | case TTM_PL_VRAM: | 276 | return 0; |
298 | new_start = (u64)new_mem->start << PAGE_SHIFT; | 277 | } |
299 | new_start += bo->bdev->man[new_mem->mem_type].gpu_offset; | 278 | |
300 | break; | 279 | static int amdgpu_move_blit(struct ttm_buffer_object *bo, |
301 | default: | 280 | bool evict, bool no_wait_gpu, |
302 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | 281 | struct ttm_mem_reg *new_mem, |
303 | return -EINVAL; | 282 | struct ttm_mem_reg *old_mem) |
304 | } | 283 | { |
284 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); | ||
285 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; | ||
286 | |||
287 | struct drm_mm_node *old_mm, *new_mm; | ||
288 | uint64_t old_start, old_size, new_start, new_size; | ||
289 | unsigned long num_pages; | ||
290 | struct fence *fence = NULL; | ||
291 | int r; | ||
292 | |||
293 | BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0); | ||
294 | |||
305 | if (!ring->ready) { | 295 | if (!ring->ready) { |
306 | DRM_ERROR("Trying to move memory with ring turned off.\n"); | 296 | DRM_ERROR("Trying to move memory with ring turned off.\n"); |
307 | return -EINVAL; | 297 | return -EINVAL; |
308 | } | 298 | } |
309 | 299 | ||
310 | BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0); | 300 | old_mm = old_mem->mm_node; |
301 | r = amdgpu_mm_node_addr(bo, old_mm, old_mem, &old_start); | ||
302 | if (r) | ||
303 | return r; | ||
304 | old_size = old_mm->size; | ||
305 | |||
311 | 306 | ||
312 | r = amdgpu_copy_buffer(ring, old_start, new_start, | 307 | new_mm = new_mem->mm_node; |
313 | new_mem->num_pages * PAGE_SIZE, /* bytes */ | 308 | r = amdgpu_mm_node_addr(bo, new_mm, new_mem, &new_start); |
314 | bo->resv, &fence, false); | ||
315 | if (r) | 309 | if (r) |
316 | return r; | 310 | return r; |
311 | new_size = new_mm->size; | ||
312 | |||
313 | num_pages = new_mem->num_pages; | ||
314 | while (num_pages) { | ||
315 | unsigned long cur_pages = min(old_size, new_size); | ||
316 | struct fence *next; | ||
317 | |||
318 | r = amdgpu_copy_buffer(ring, old_start, new_start, | ||
319 | cur_pages * PAGE_SIZE, | ||
320 | bo->resv, &next, false); | ||
321 | if (r) | ||
322 | goto error; | ||
323 | |||
324 | fence_put(fence); | ||
325 | fence = next; | ||
326 | |||
327 | num_pages -= cur_pages; | ||
328 | if (!num_pages) | ||
329 | break; | ||
330 | |||
331 | old_size -= cur_pages; | ||
332 | if (!old_size) { | ||
333 | r = amdgpu_mm_node_addr(bo, ++old_mm, old_mem, | ||
334 | &old_start); | ||
335 | if (r) | ||
336 | goto error; | ||
337 | old_size = old_mm->size; | ||
338 | } else { | ||
339 | old_start += cur_pages * PAGE_SIZE; | ||
340 | } | ||
341 | |||
342 | new_size -= cur_pages; | ||
343 | if (!new_size) { | ||
344 | r = amdgpu_mm_node_addr(bo, ++new_mm, new_mem, | ||
345 | &new_start); | ||
346 | if (r) | ||
347 | goto error; | ||
348 | |||
349 | new_size = new_mm->size; | ||
350 | } else { | ||
351 | new_start += cur_pages * PAGE_SIZE; | ||
352 | } | ||
353 | } | ||
317 | 354 | ||
318 | r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); | 355 | r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); |
319 | fence_put(fence); | 356 | fence_put(fence); |
320 | return r; | 357 | return r; |
358 | |||
359 | error: | ||
360 | if (fence) | ||
361 | fence_wait(fence, false); | ||
362 | fence_put(fence); | ||
363 | return r; | ||
321 | } | 364 | } |
322 | 365 | ||
323 | static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, | 366 | static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, |
@@ -332,7 +375,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, | |||
332 | struct ttm_placement placement; | 375 | struct ttm_placement placement; |
333 | int r; | 376 | int r; |
334 | 377 | ||
335 | adev = amdgpu_get_adev(bo->bdev); | 378 | adev = amdgpu_ttm_adev(bo->bdev); |
336 | tmp_mem = *new_mem; | 379 | tmp_mem = *new_mem; |
337 | tmp_mem.mm_node = NULL; | 380 | tmp_mem.mm_node = NULL; |
338 | placement.num_placement = 1; | 381 | placement.num_placement = 1; |
@@ -379,7 +422,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, | |||
379 | struct ttm_place placements; | 422 | struct ttm_place placements; |
380 | int r; | 423 | int r; |
381 | 424 | ||
382 | adev = amdgpu_get_adev(bo->bdev); | 425 | adev = amdgpu_ttm_adev(bo->bdev); |
383 | tmp_mem = *new_mem; | 426 | tmp_mem = *new_mem; |
384 | tmp_mem.mm_node = NULL; | 427 | tmp_mem.mm_node = NULL; |
385 | placement.num_placement = 1; | 428 | placement.num_placement = 1; |
@@ -422,7 +465,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, | |||
422 | if (WARN_ON_ONCE(abo->pin_count > 0)) | 465 | if (WARN_ON_ONCE(abo->pin_count > 0)) |
423 | return -EINVAL; | 466 | return -EINVAL; |
424 | 467 | ||
425 | adev = amdgpu_get_adev(bo->bdev); | 468 | adev = amdgpu_ttm_adev(bo->bdev); |
426 | 469 | ||
427 | /* remember the eviction */ | 470 | /* remember the eviction */ |
428 | if (evict) | 471 | if (evict) |
@@ -475,7 +518,7 @@ memcpy: | |||
475 | static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | 518 | static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
476 | { | 519 | { |
477 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | 520 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
478 | struct amdgpu_device *adev = amdgpu_get_adev(bdev); | 521 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); |
479 | 522 | ||
480 | mem->bus.addr = NULL; | 523 | mem->bus.addr = NULL; |
481 | mem->bus.offset = 0; | 524 | mem->bus.offset = 0; |
@@ -607,7 +650,7 @@ release_pages: | |||
607 | /* prepare the sg table with the user pages */ | 650 | /* prepare the sg table with the user pages */ |
608 | static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) | 651 | static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) |
609 | { | 652 | { |
610 | struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev); | 653 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
611 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | 654 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
612 | unsigned nents; | 655 | unsigned nents; |
613 | int r; | 656 | int r; |
@@ -639,7 +682,7 @@ release_sg: | |||
639 | 682 | ||
640 | static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) | 683 | static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) |
641 | { | 684 | { |
642 | struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev); | 685 | struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); |
643 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | 686 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
644 | struct sg_page_iter sg_iter; | 687 | struct sg_page_iter sg_iter; |
645 | 688 | ||
@@ -799,7 +842,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev, | |||
799 | struct amdgpu_device *adev; | 842 | struct amdgpu_device *adev; |
800 | struct amdgpu_ttm_tt *gtt; | 843 | struct amdgpu_ttm_tt *gtt; |
801 | 844 | ||
802 | adev = amdgpu_get_adev(bdev); | 845 | adev = amdgpu_ttm_adev(bdev); |
803 | 846 | ||
804 | gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); | 847 | gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); |
805 | if (gtt == NULL) { | 848 | if (gtt == NULL) { |
@@ -843,7 +886,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) | |||
843 | return 0; | 886 | return 0; |
844 | } | 887 | } |
845 | 888 | ||
846 | adev = amdgpu_get_adev(ttm->bdev); | 889 | adev = amdgpu_ttm_adev(ttm->bdev); |
847 | 890 | ||
848 | #ifdef CONFIG_SWIOTLB | 891 | #ifdef CONFIG_SWIOTLB |
849 | if (swiotlb_nr_tbl()) { | 892 | if (swiotlb_nr_tbl()) { |
@@ -889,7 +932,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) | |||
889 | if (slave) | 932 | if (slave) |
890 | return; | 933 | return; |
891 | 934 | ||
892 | adev = amdgpu_get_adev(ttm->bdev); | 935 | adev = amdgpu_ttm_adev(ttm->bdev); |
893 | 936 | ||
894 | #ifdef CONFIG_SWIOTLB | 937 | #ifdef CONFIG_SWIOTLB |
895 | if (swiotlb_nr_tbl()) { | 938 | if (swiotlb_nr_tbl()) { |
@@ -1012,7 +1055,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, | |||
1012 | 1055 | ||
1013 | static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo) | 1056 | static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo) |
1014 | { | 1057 | { |
1015 | struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev); | 1058 | struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); |
1016 | unsigned i, j; | 1059 | unsigned i, j; |
1017 | 1060 | ||
1018 | for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) { | 1061 | for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) { |
@@ -1029,7 +1072,7 @@ static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo) | |||
1029 | 1072 | ||
1030 | static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo) | 1073 | static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo) |
1031 | { | 1074 | { |
1032 | struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev); | 1075 | struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); |
1033 | unsigned log2_size = min(ilog2(tbo->num_pages), | 1076 | unsigned log2_size = min(ilog2(tbo->num_pages), |
1034 | AMDGPU_TTM_LRU_SIZE - 1); | 1077 | AMDGPU_TTM_LRU_SIZE - 1); |
1035 | 1078 | ||
@@ -1060,12 +1103,37 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo) | |||
1060 | return res; | 1103 | return res; |
1061 | } | 1104 | } |
1062 | 1105 | ||
1106 | static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, | ||
1107 | const struct ttm_place *place) | ||
1108 | { | ||
1109 | if (bo->mem.mem_type == TTM_PL_VRAM && | ||
1110 | bo->mem.start == AMDGPU_BO_INVALID_OFFSET) { | ||
1111 | unsigned long num_pages = bo->mem.num_pages; | ||
1112 | struct drm_mm_node *node = bo->mem.mm_node; | ||
1113 | |||
1114 | /* Check each drm MM node individually */ | ||
1115 | while (num_pages) { | ||
1116 | if (place->fpfn < (node->start + node->size) && | ||
1117 | !(place->lpfn && place->lpfn <= node->start)) | ||
1118 | return true; | ||
1119 | |||
1120 | num_pages -= node->size; | ||
1121 | ++node; | ||
1122 | } | ||
1123 | |||
1124 | return false; | ||
1125 | } | ||
1126 | |||
1127 | return ttm_bo_eviction_valuable(bo, place); | ||
1128 | } | ||
1129 | |||
1063 | static struct ttm_bo_driver amdgpu_bo_driver = { | 1130 | static struct ttm_bo_driver amdgpu_bo_driver = { |
1064 | .ttm_tt_create = &amdgpu_ttm_tt_create, | 1131 | .ttm_tt_create = &amdgpu_ttm_tt_create, |
1065 | .ttm_tt_populate = &amdgpu_ttm_tt_populate, | 1132 | .ttm_tt_populate = &amdgpu_ttm_tt_populate, |
1066 | .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, | 1133 | .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, |
1067 | .invalidate_caches = &amdgpu_invalidate_caches, | 1134 | .invalidate_caches = &amdgpu_invalidate_caches, |
1068 | .init_mem_type = &amdgpu_init_mem_type, | 1135 | .init_mem_type = &amdgpu_init_mem_type, |
1136 | .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, | ||
1069 | .evict_flags = &amdgpu_evict_flags, | 1137 | .evict_flags = &amdgpu_evict_flags, |
1070 | .move = &amdgpu_bo_move, | 1138 | .move = &amdgpu_bo_move, |
1071 | .verify_access = &amdgpu_verify_access, | 1139 | .verify_access = &amdgpu_verify_access, |
@@ -1119,7 +1187,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) | |||
1119 | 1187 | ||
1120 | r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true, | 1188 | r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true, |
1121 | AMDGPU_GEM_DOMAIN_VRAM, | 1189 | AMDGPU_GEM_DOMAIN_VRAM, |
1122 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 1190 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
1191 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
1123 | NULL, NULL, &adev->stollen_vga_memory); | 1192 | NULL, NULL, &adev->stollen_vga_memory); |
1124 | if (r) { | 1193 | if (r) { |
1125 | return r; | 1194 | return r; |
@@ -1317,7 +1386,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, | |||
1317 | struct reservation_object *resv, | 1386 | struct reservation_object *resv, |
1318 | struct fence **fence) | 1387 | struct fence **fence) |
1319 | { | 1388 | { |
1320 | struct amdgpu_device *adev = bo->adev; | 1389 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
1321 | struct amdgpu_job *job; | 1390 | struct amdgpu_job *job; |
1322 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; | 1391 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; |
1323 | 1392 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 9812c805326c..d1c00c04782f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | |||
@@ -66,6 +66,7 @@ struct amdgpu_mman { | |||
66 | }; | 66 | }; |
67 | 67 | ||
68 | extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; | 68 | extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; |
69 | extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; | ||
69 | 70 | ||
70 | int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, | 71 | int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, |
71 | struct ttm_buffer_object *tbo, | 72 | struct ttm_buffer_object *tbo, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index cb3d252f3c78..0f0b38191fac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | |||
@@ -228,6 +228,9 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode, | |||
228 | ucode->mc_addr = mc_addr; | 228 | ucode->mc_addr = mc_addr; |
229 | ucode->kaddr = kptr; | 229 | ucode->kaddr = kptr; |
230 | 230 | ||
231 | if (ucode->ucode_id == AMDGPU_UCODE_ID_STORAGE) | ||
232 | return 0; | ||
233 | |||
231 | header = (const struct common_firmware_header *)ucode->fw->data; | 234 | header = (const struct common_firmware_header *)ucode->fw->data; |
232 | memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + | 235 | memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + |
233 | le32_to_cpu(header->ucode_array_offset_bytes)), | 236 | le32_to_cpu(header->ucode_array_offset_bytes)), |
@@ -236,6 +239,31 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_firmware_info *ucode, | |||
236 | return 0; | 239 | return 0; |
237 | } | 240 | } |
238 | 241 | ||
242 | static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode, | ||
243 | uint64_t mc_addr, void *kptr) | ||
244 | { | ||
245 | const struct gfx_firmware_header_v1_0 *header = NULL; | ||
246 | const struct common_firmware_header *comm_hdr = NULL; | ||
247 | uint8_t* src_addr = NULL; | ||
248 | uint8_t* dst_addr = NULL; | ||
249 | |||
250 | if (NULL == ucode->fw) | ||
251 | return 0; | ||
252 | |||
253 | comm_hdr = (const struct common_firmware_header *)ucode->fw->data; | ||
254 | header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; | ||
255 | dst_addr = ucode->kaddr + | ||
256 | ALIGN(le32_to_cpu(comm_hdr->ucode_size_bytes), | ||
257 | PAGE_SIZE); | ||
258 | src_addr = (uint8_t *)ucode->fw->data + | ||
259 | le32_to_cpu(comm_hdr->ucode_array_offset_bytes) + | ||
260 | (le32_to_cpu(header->jt_offset) * 4); | ||
261 | memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4); | ||
262 | |||
263 | return 0; | ||
264 | } | ||
265 | |||
266 | |||
239 | int amdgpu_ucode_init_bo(struct amdgpu_device *adev) | 267 | int amdgpu_ucode_init_bo(struct amdgpu_device *adev) |
240 | { | 268 | { |
241 | struct amdgpu_bo **bo = &adev->firmware.fw_buf; | 269 | struct amdgpu_bo **bo = &adev->firmware.fw_buf; |
@@ -247,7 +275,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) | |||
247 | const struct common_firmware_header *header = NULL; | 275 | const struct common_firmware_header *header = NULL; |
248 | 276 | ||
249 | err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, | 277 | err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, |
250 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo); | 278 | amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, |
279 | 0, NULL, NULL, bo); | ||
251 | if (err) { | 280 | if (err) { |
252 | dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); | 281 | dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); |
253 | goto failed; | 282 | goto failed; |
@@ -259,7 +288,8 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) | |||
259 | goto failed_reserve; | 288 | goto failed_reserve; |
260 | } | 289 | } |
261 | 290 | ||
262 | err = amdgpu_bo_pin(*bo, AMDGPU_GEM_DOMAIN_GTT, &fw_mc_addr); | 291 | err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, |
292 | &fw_mc_addr); | ||
263 | if (err) { | 293 | if (err) { |
264 | dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err); | 294 | dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err); |
265 | goto failed_pin; | 295 | goto failed_pin; |
@@ -279,6 +309,13 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) | |||
279 | header = (const struct common_firmware_header *)ucode->fw->data; | 309 | header = (const struct common_firmware_header *)ucode->fw->data; |
280 | amdgpu_ucode_init_single_fw(ucode, fw_mc_addr + fw_offset, | 310 | amdgpu_ucode_init_single_fw(ucode, fw_mc_addr + fw_offset, |
281 | fw_buf_ptr + fw_offset); | 311 | fw_buf_ptr + fw_offset); |
312 | if (i == AMDGPU_UCODE_ID_CP_MEC1) { | ||
313 | const struct gfx_firmware_header_v1_0 *cp_hdr; | ||
314 | cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; | ||
315 | amdgpu_ucode_patch_jt(ucode, fw_mc_addr + fw_offset, | ||
316 | fw_buf_ptr + fw_offset); | ||
317 | fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE); | ||
318 | } | ||
282 | fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); | 319 | fw_offset += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); |
283 | } | 320 | } |
284 | } | 321 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index e468be4e28fa..a8a4230729f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h | |||
@@ -130,6 +130,7 @@ enum AMDGPU_UCODE_ID { | |||
130 | AMDGPU_UCODE_ID_CP_MEC1, | 130 | AMDGPU_UCODE_ID_CP_MEC1, |
131 | AMDGPU_UCODE_ID_CP_MEC2, | 131 | AMDGPU_UCODE_ID_CP_MEC2, |
132 | AMDGPU_UCODE_ID_RLC_G, | 132 | AMDGPU_UCODE_ID_RLC_G, |
133 | AMDGPU_UCODE_ID_STORAGE, | ||
133 | AMDGPU_UCODE_ID_MAXIMUM, | 134 | AMDGPU_UCODE_ID_MAXIMUM, |
134 | }; | 135 | }; |
135 | 136 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index e3281cacc586..1b54cc218b47 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
@@ -876,6 +876,9 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) | |||
876 | struct amdgpu_ib *ib = &parser->job->ibs[ib_idx]; | 876 | struct amdgpu_ib *ib = &parser->job->ibs[ib_idx]; |
877 | int r; | 877 | int r; |
878 | 878 | ||
879 | parser->job->vm = NULL; | ||
880 | ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); | ||
881 | |||
879 | if (ib->length_dw % 16) { | 882 | if (ib->length_dw % 16) { |
880 | DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n", | 883 | DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n", |
881 | ib->length_dw); | 884 | ib->length_dw); |
@@ -931,7 +934,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, | |||
931 | if (r) | 934 | if (r) |
932 | return r; | 935 | return r; |
933 | 936 | ||
934 | if (!bo->adev->uvd.address_64_bit) { | 937 | if (!ring->adev->uvd.address_64_bit) { |
935 | amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); | 938 | amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); |
936 | amdgpu_uvd_force_into_uvd_segment(bo); | 939 | amdgpu_uvd_force_into_uvd_segment(bo); |
937 | } | 940 | } |
@@ -1002,7 +1005,8 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
1002 | 1005 | ||
1003 | r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, | 1006 | r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, |
1004 | AMDGPU_GEM_DOMAIN_VRAM, | 1007 | AMDGPU_GEM_DOMAIN_VRAM, |
1005 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 1008 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
1009 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
1006 | NULL, NULL, &bo); | 1010 | NULL, NULL, &bo); |
1007 | if (r) | 1011 | if (r) |
1008 | return r; | 1012 | return r; |
@@ -1051,7 +1055,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
1051 | 1055 | ||
1052 | r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, | 1056 | r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, |
1053 | AMDGPU_GEM_DOMAIN_VRAM, | 1057 | AMDGPU_GEM_DOMAIN_VRAM, |
1054 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 1058 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
1059 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
1055 | NULL, NULL, &bo); | 1060 | NULL, NULL, &bo); |
1056 | if (r) | 1061 | if (r) |
1057 | return r; | 1062 | return r; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 7fe8fd884f06..3d6f86cd028f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | |||
@@ -157,7 +157,8 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) | |||
157 | 157 | ||
158 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, | 158 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, |
159 | AMDGPU_GEM_DOMAIN_VRAM, | 159 | AMDGPU_GEM_DOMAIN_VRAM, |
160 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 160 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
161 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
161 | NULL, NULL, &adev->vce.vcpu_bo); | 162 | NULL, NULL, &adev->vce.vcpu_bo); |
162 | if (r) { | 163 | if (r) { |
163 | dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r); | 164 | dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r); |
@@ -641,6 +642,9 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) | |||
641 | uint32_t *size = &tmp; | 642 | uint32_t *size = &tmp; |
642 | int i, r, idx = 0; | 643 | int i, r, idx = 0; |
643 | 644 | ||
645 | p->job->vm = NULL; | ||
646 | ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); | ||
647 | |||
644 | r = amdgpu_cs_sysvm_access_required(p); | 648 | r = amdgpu_cs_sysvm_access_required(p); |
645 | if (r) | 649 | if (r) |
646 | return r; | 650 | return r; |
@@ -788,6 +792,96 @@ out: | |||
788 | } | 792 | } |
789 | 793 | ||
790 | /** | 794 | /** |
795 | * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode | ||
796 | * | ||
797 | * @p: parser context | ||
798 | * | ||
799 | */ | ||
800 | int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx) | ||
801 | { | ||
802 | struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; | ||
803 | int session_idx = -1; | ||
804 | uint32_t destroyed = 0; | ||
805 | uint32_t created = 0; | ||
806 | uint32_t allocated = 0; | ||
807 | uint32_t tmp, handle = 0; | ||
808 | int i, r = 0, idx = 0; | ||
809 | |||
810 | while (idx < ib->length_dw) { | ||
811 | uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); | ||
812 | uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); | ||
813 | |||
814 | if ((len < 8) || (len & 3)) { | ||
815 | DRM_ERROR("invalid VCE command length (%d)!\n", len); | ||
816 | r = -EINVAL; | ||
817 | goto out; | ||
818 | } | ||
819 | |||
820 | switch (cmd) { | ||
821 | case 0x00000001: /* session */ | ||
822 | handle = amdgpu_get_ib_value(p, ib_idx, idx + 2); | ||
823 | session_idx = amdgpu_vce_validate_handle(p, handle, | ||
824 | &allocated); | ||
825 | if (session_idx < 0) { | ||
826 | r = session_idx; | ||
827 | goto out; | ||
828 | } | ||
829 | break; | ||
830 | |||
831 | case 0x01000001: /* create */ | ||
832 | created |= 1 << session_idx; | ||
833 | if (destroyed & (1 << session_idx)) { | ||
834 | destroyed &= ~(1 << session_idx); | ||
835 | allocated |= 1 << session_idx; | ||
836 | |||
837 | } else if (!(allocated & (1 << session_idx))) { | ||
838 | DRM_ERROR("Handle already in use!\n"); | ||
839 | r = -EINVAL; | ||
840 | goto out; | ||
841 | } | ||
842 | |||
843 | break; | ||
844 | |||
845 | case 0x02000001: /* destroy */ | ||
846 | destroyed |= 1 << session_idx; | ||
847 | break; | ||
848 | |||
849 | default: | ||
850 | break; | ||
851 | } | ||
852 | |||
853 | if (session_idx == -1) { | ||
854 | DRM_ERROR("no session command at start of IB\n"); | ||
855 | r = -EINVAL; | ||
856 | goto out; | ||
857 | } | ||
858 | |||
859 | idx += len / 4; | ||
860 | } | ||
861 | |||
862 | if (allocated & ~created) { | ||
863 | DRM_ERROR("New session without create command!\n"); | ||
864 | r = -ENOENT; | ||
865 | } | ||
866 | |||
867 | out: | ||
868 | if (!r) { | ||
869 | /* No error, free all destroyed handle slots */ | ||
870 | tmp = destroyed; | ||
871 | amdgpu_ib_free(p->adev, ib, NULL); | ||
872 | } else { | ||
873 | /* Error during parsing, free all allocated handle slots */ | ||
874 | tmp = allocated; | ||
875 | } | ||
876 | |||
877 | for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) | ||
878 | if (tmp & (1 << i)) | ||
879 | atomic_set(&p->adev->vce.handles[i], 0); | ||
880 | |||
881 | return r; | ||
882 | } | ||
883 | |||
884 | /** | ||
791 | * amdgpu_vce_ring_emit_ib - execute indirect buffer | 885 | * amdgpu_vce_ring_emit_ib - execute indirect buffer |
792 | * | 886 | * |
793 | * @ring: engine to use | 887 | * @ring: engine to use |
@@ -823,18 +917,6 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | |||
823 | amdgpu_ring_write(ring, VCE_CMD_END); | 917 | amdgpu_ring_write(ring, VCE_CMD_END); |
824 | } | 918 | } |
825 | 919 | ||
826 | unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring) | ||
827 | { | ||
828 | return | ||
829 | 4; /* amdgpu_vce_ring_emit_ib */ | ||
830 | } | ||
831 | |||
832 | unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring) | ||
833 | { | ||
834 | return | ||
835 | 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */ | ||
836 | } | ||
837 | |||
838 | /** | 920 | /** |
839 | * amdgpu_vce_ring_test_ring - test if VCE ring is working | 921 | * amdgpu_vce_ring_test_ring - test if VCE ring is working |
840 | * | 922 | * |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index 12729d2852df..44d49b576513 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h | |||
@@ -34,6 +34,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
34 | bool direct, struct fence **fence); | 34 | bool direct, struct fence **fence); |
35 | void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); | 35 | void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); |
36 | int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); | 36 | int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); |
37 | int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx); | ||
37 | void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, | 38 | void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, |
38 | unsigned vm_id, bool ctx_switch); | 39 | unsigned vm_id, bool ctx_switch); |
39 | void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | 40 | void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 06f24322e7c3..ded57dd538e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -116,38 +116,43 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, | |||
116 | } | 116 | } |
117 | 117 | ||
118 | /** | 118 | /** |
119 | * amdgpu_vm_get_bos - add the vm BOs to a duplicates list | 119 | * amdgpu_vm_validate_pt_bos - validate the page table BOs |
120 | * | 120 | * |
121 | * @adev: amdgpu device pointer | 121 | * @adev: amdgpu device pointer |
122 | * @vm: vm providing the BOs | 122 | * @vm: vm providing the BOs |
123 | * @duplicates: head of duplicates list | 123 | * @validate: callback to do the validation |
124 | * @param: parameter for the validation callback | ||
124 | * | 125 | * |
125 | * Add the page directory to the BO duplicates list | 126 | * Validate the page table BOs on command submission if neccessary. |
126 | * for command submission. | ||
127 | */ | 127 | */ |
128 | void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, | 128 | int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, |
129 | struct list_head *duplicates) | 129 | int (*validate)(void *p, struct amdgpu_bo *bo), |
130 | void *param) | ||
130 | { | 131 | { |
131 | uint64_t num_evictions; | 132 | uint64_t num_evictions; |
132 | unsigned i; | 133 | unsigned i; |
134 | int r; | ||
133 | 135 | ||
134 | /* We only need to validate the page tables | 136 | /* We only need to validate the page tables |
135 | * if they aren't already valid. | 137 | * if they aren't already valid. |
136 | */ | 138 | */ |
137 | num_evictions = atomic64_read(&adev->num_evictions); | 139 | num_evictions = atomic64_read(&adev->num_evictions); |
138 | if (num_evictions == vm->last_eviction_counter) | 140 | if (num_evictions == vm->last_eviction_counter) |
139 | return; | 141 | return 0; |
140 | 142 | ||
141 | /* add the vm page table to the list */ | 143 | /* add the vm page table to the list */ |
142 | for (i = 0; i <= vm->max_pde_used; ++i) { | 144 | for (i = 0; i <= vm->max_pde_used; ++i) { |
143 | struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; | 145 | struct amdgpu_bo *bo = vm->page_tables[i].bo; |
144 | 146 | ||
145 | if (!entry->robj) | 147 | if (!bo) |
146 | continue; | 148 | continue; |
147 | 149 | ||
148 | list_add(&entry->tv.head, duplicates); | 150 | r = validate(param, bo); |
151 | if (r) | ||
152 | return r; | ||
149 | } | 153 | } |
150 | 154 | ||
155 | return 0; | ||
151 | } | 156 | } |
152 | 157 | ||
153 | /** | 158 | /** |
@@ -166,12 +171,12 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, | |||
166 | 171 | ||
167 | spin_lock(&glob->lru_lock); | 172 | spin_lock(&glob->lru_lock); |
168 | for (i = 0; i <= vm->max_pde_used; ++i) { | 173 | for (i = 0; i <= vm->max_pde_used; ++i) { |
169 | struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; | 174 | struct amdgpu_bo *bo = vm->page_tables[i].bo; |
170 | 175 | ||
171 | if (!entry->robj) | 176 | if (!bo) |
172 | continue; | 177 | continue; |
173 | 178 | ||
174 | ttm_bo_move_to_lru_tail(&entry->robj->tbo); | 179 | ttm_bo_move_to_lru_tail(&bo->tbo); |
175 | } | 180 | } |
176 | spin_unlock(&glob->lru_lock); | 181 | spin_unlock(&glob->lru_lock); |
177 | } | 182 | } |
@@ -341,9 +346,9 @@ error: | |||
341 | static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring) | 346 | static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring) |
342 | { | 347 | { |
343 | struct amdgpu_device *adev = ring->adev; | 348 | struct amdgpu_device *adev = ring->adev; |
344 | const struct amdgpu_ip_block_version *ip_block; | 349 | const struct amdgpu_ip_block *ip_block; |
345 | 350 | ||
346 | if (ring->type != AMDGPU_RING_TYPE_COMPUTE) | 351 | if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE) |
347 | /* only compute rings */ | 352 | /* only compute rings */ |
348 | return false; | 353 | return false; |
349 | 354 | ||
@@ -351,10 +356,10 @@ static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring) | |||
351 | if (!ip_block) | 356 | if (!ip_block) |
352 | return false; | 357 | return false; |
353 | 358 | ||
354 | if (ip_block->major <= 7) { | 359 | if (ip_block->version->major <= 7) { |
355 | /* gfx7 has no workaround */ | 360 | /* gfx7 has no workaround */ |
356 | return true; | 361 | return true; |
357 | } else if (ip_block->major == 8) { | 362 | } else if (ip_block->version->major == 8) { |
358 | if (adev->gfx.mec_fw_version >= 673) | 363 | if (adev->gfx.mec_fw_version >= 673) |
359 | /* gfx8 is fixed in MEC firmware 673 */ | 364 | /* gfx8 is fixed in MEC firmware 673 */ |
360 | return false; | 365 | return false; |
@@ -612,16 +617,26 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) | |||
612 | return result; | 617 | return result; |
613 | } | 618 | } |
614 | 619 | ||
615 | static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev, | 620 | /* |
616 | struct amdgpu_vm *vm, | 621 | * amdgpu_vm_update_pdes - make sure that page directory is valid |
617 | bool shadow) | 622 | * |
623 | * @adev: amdgpu_device pointer | ||
624 | * @vm: requested vm | ||
625 | * @start: start of GPU address range | ||
626 | * @end: end of GPU address range | ||
627 | * | ||
628 | * Allocates new page tables if necessary | ||
629 | * and updates the page directory. | ||
630 | * Returns 0 for success, error for failure. | ||
631 | */ | ||
632 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | ||
633 | struct amdgpu_vm *vm) | ||
618 | { | 634 | { |
635 | struct amdgpu_bo *shadow; | ||
619 | struct amdgpu_ring *ring; | 636 | struct amdgpu_ring *ring; |
620 | struct amdgpu_bo *pd = shadow ? vm->page_directory->shadow : | 637 | uint64_t pd_addr, shadow_addr; |
621 | vm->page_directory; | ||
622 | uint64_t pd_addr; | ||
623 | uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; | 638 | uint32_t incr = AMDGPU_VM_PTE_COUNT * 8; |
624 | uint64_t last_pde = ~0, last_pt = ~0; | 639 | uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0; |
625 | unsigned count = 0, pt_idx, ndw; | 640 | unsigned count = 0, pt_idx, ndw; |
626 | struct amdgpu_job *job; | 641 | struct amdgpu_job *job; |
627 | struct amdgpu_pte_update_params params; | 642 | struct amdgpu_pte_update_params params; |
@@ -629,15 +644,8 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev, | |||
629 | 644 | ||
630 | int r; | 645 | int r; |
631 | 646 | ||
632 | if (!pd) | ||
633 | return 0; | ||
634 | |||
635 | r = amdgpu_ttm_bind(&pd->tbo, &pd->tbo.mem); | ||
636 | if (r) | ||
637 | return r; | ||
638 | |||
639 | pd_addr = amdgpu_bo_gpu_offset(pd); | ||
640 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); | 647 | ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); |
648 | shadow = vm->page_directory->shadow; | ||
641 | 649 | ||
642 | /* padding, etc. */ | 650 | /* padding, etc. */ |
643 | ndw = 64; | 651 | ndw = 64; |
@@ -645,6 +653,17 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev, | |||
645 | /* assume the worst case */ | 653 | /* assume the worst case */ |
646 | ndw += vm->max_pde_used * 6; | 654 | ndw += vm->max_pde_used * 6; |
647 | 655 | ||
656 | pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); | ||
657 | if (shadow) { | ||
658 | r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem); | ||
659 | if (r) | ||
660 | return r; | ||
661 | shadow_addr = amdgpu_bo_gpu_offset(shadow); | ||
662 | ndw *= 2; | ||
663 | } else { | ||
664 | shadow_addr = 0; | ||
665 | } | ||
666 | |||
648 | r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); | 667 | r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); |
649 | if (r) | 668 | if (r) |
650 | return r; | 669 | return r; |
@@ -655,30 +674,26 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev, | |||
655 | 674 | ||
656 | /* walk over the address space and update the page directory */ | 675 | /* walk over the address space and update the page directory */ |
657 | for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { | 676 | for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { |
658 | struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj; | 677 | struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo; |
659 | uint64_t pde, pt; | 678 | uint64_t pde, pt; |
660 | 679 | ||
661 | if (bo == NULL) | 680 | if (bo == NULL) |
662 | continue; | 681 | continue; |
663 | 682 | ||
664 | if (bo->shadow) { | 683 | if (bo->shadow) { |
665 | struct amdgpu_bo *shadow = bo->shadow; | 684 | struct amdgpu_bo *pt_shadow = bo->shadow; |
666 | 685 | ||
667 | r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem); | 686 | r = amdgpu_ttm_bind(&pt_shadow->tbo, |
687 | &pt_shadow->tbo.mem); | ||
668 | if (r) | 688 | if (r) |
669 | return r; | 689 | return r; |
670 | } | 690 | } |
671 | 691 | ||
672 | pt = amdgpu_bo_gpu_offset(bo); | 692 | pt = amdgpu_bo_gpu_offset(bo); |
673 | if (!shadow) { | 693 | if (vm->page_tables[pt_idx].addr == pt) |
674 | if (vm->page_tables[pt_idx].addr == pt) | 694 | continue; |
675 | continue; | 695 | |
676 | vm->page_tables[pt_idx].addr = pt; | 696 | vm->page_tables[pt_idx].addr = pt; |
677 | } else { | ||
678 | if (vm->page_tables[pt_idx].shadow_addr == pt) | ||
679 | continue; | ||
680 | vm->page_tables[pt_idx].shadow_addr = pt; | ||
681 | } | ||
682 | 697 | ||
683 | pde = pd_addr + pt_idx * 8; | 698 | pde = pd_addr + pt_idx * 8; |
684 | if (((last_pde + 8 * count) != pde) || | 699 | if (((last_pde + 8 * count) != pde) || |
@@ -686,6 +701,13 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev, | |||
686 | (count == AMDGPU_VM_MAX_UPDATE_SIZE)) { | 701 | (count == AMDGPU_VM_MAX_UPDATE_SIZE)) { |
687 | 702 | ||
688 | if (count) { | 703 | if (count) { |
704 | if (shadow) | ||
705 | amdgpu_vm_do_set_ptes(¶ms, | ||
706 | last_shadow, | ||
707 | last_pt, count, | ||
708 | incr, | ||
709 | AMDGPU_PTE_VALID); | ||
710 | |||
689 | amdgpu_vm_do_set_ptes(¶ms, last_pde, | 711 | amdgpu_vm_do_set_ptes(¶ms, last_pde, |
690 | last_pt, count, incr, | 712 | last_pt, count, incr, |
691 | AMDGPU_PTE_VALID); | 713 | AMDGPU_PTE_VALID); |
@@ -693,34 +715,44 @@ static int amdgpu_vm_update_pd_or_shadow(struct amdgpu_device *adev, | |||
693 | 715 | ||
694 | count = 1; | 716 | count = 1; |
695 | last_pde = pde; | 717 | last_pde = pde; |
718 | last_shadow = shadow_addr + pt_idx * 8; | ||
696 | last_pt = pt; | 719 | last_pt = pt; |
697 | } else { | 720 | } else { |
698 | ++count; | 721 | ++count; |
699 | } | 722 | } |
700 | } | 723 | } |
701 | 724 | ||
702 | if (count) | 725 | if (count) { |
726 | if (vm->page_directory->shadow) | ||
727 | amdgpu_vm_do_set_ptes(¶ms, last_shadow, last_pt, | ||
728 | count, incr, AMDGPU_PTE_VALID); | ||
729 | |||
703 | amdgpu_vm_do_set_ptes(¶ms, last_pde, last_pt, | 730 | amdgpu_vm_do_set_ptes(¶ms, last_pde, last_pt, |
704 | count, incr, AMDGPU_PTE_VALID); | 731 | count, incr, AMDGPU_PTE_VALID); |
732 | } | ||
705 | 733 | ||
706 | if (params.ib->length_dw != 0) { | 734 | if (params.ib->length_dw == 0) { |
707 | amdgpu_ring_pad_ib(ring, params.ib); | 735 | amdgpu_job_free(job); |
708 | amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv, | 736 | return 0; |
737 | } | ||
738 | |||
739 | amdgpu_ring_pad_ib(ring, params.ib); | ||
740 | amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, | ||
741 | AMDGPU_FENCE_OWNER_VM); | ||
742 | if (shadow) | ||
743 | amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv, | ||
709 | AMDGPU_FENCE_OWNER_VM); | 744 | AMDGPU_FENCE_OWNER_VM); |
710 | WARN_ON(params.ib->length_dw > ndw); | ||
711 | r = amdgpu_job_submit(job, ring, &vm->entity, | ||
712 | AMDGPU_FENCE_OWNER_VM, &fence); | ||
713 | if (r) | ||
714 | goto error_free; | ||
715 | 745 | ||
716 | amdgpu_bo_fence(pd, fence, true); | 746 | WARN_ON(params.ib->length_dw > ndw); |
717 | fence_put(vm->page_directory_fence); | 747 | r = amdgpu_job_submit(job, ring, &vm->entity, |
718 | vm->page_directory_fence = fence_get(fence); | 748 | AMDGPU_FENCE_OWNER_VM, &fence); |
719 | fence_put(fence); | 749 | if (r) |
750 | goto error_free; | ||
720 | 751 | ||
721 | } else { | 752 | amdgpu_bo_fence(vm->page_directory, fence, true); |
722 | amdgpu_job_free(job); | 753 | fence_put(vm->page_directory_fence); |
723 | } | 754 | vm->page_directory_fence = fence_get(fence); |
755 | fence_put(fence); | ||
724 | 756 | ||
725 | return 0; | 757 | return 0; |
726 | 758 | ||
@@ -729,29 +761,6 @@ error_free: | |||
729 | return r; | 761 | return r; |
730 | } | 762 | } |
731 | 763 | ||
732 | /* | ||
733 | * amdgpu_vm_update_pdes - make sure that page directory is valid | ||
734 | * | ||
735 | * @adev: amdgpu_device pointer | ||
736 | * @vm: requested vm | ||
737 | * @start: start of GPU address range | ||
738 | * @end: end of GPU address range | ||
739 | * | ||
740 | * Allocates new page tables if necessary | ||
741 | * and updates the page directory. | ||
742 | * Returns 0 for success, error for failure. | ||
743 | */ | ||
744 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | ||
745 | struct amdgpu_vm *vm) | ||
746 | { | ||
747 | int r; | ||
748 | |||
749 | r = amdgpu_vm_update_pd_or_shadow(adev, vm, true); | ||
750 | if (r) | ||
751 | return r; | ||
752 | return amdgpu_vm_update_pd_or_shadow(adev, vm, false); | ||
753 | } | ||
754 | |||
755 | /** | 764 | /** |
756 | * amdgpu_vm_update_ptes - make sure that page tables are valid | 765 | * amdgpu_vm_update_ptes - make sure that page tables are valid |
757 | * | 766 | * |
@@ -781,11 +790,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, | |||
781 | /* initialize the variables */ | 790 | /* initialize the variables */ |
782 | addr = start; | 791 | addr = start; |
783 | pt_idx = addr >> amdgpu_vm_block_size; | 792 | pt_idx = addr >> amdgpu_vm_block_size; |
784 | pt = vm->page_tables[pt_idx].entry.robj; | 793 | pt = vm->page_tables[pt_idx].bo; |
785 | if (params->shadow) { | 794 | if (params->shadow) { |
786 | if (!pt->shadow) | 795 | if (!pt->shadow) |
787 | return; | 796 | return; |
788 | pt = vm->page_tables[pt_idx].entry.robj->shadow; | 797 | pt = pt->shadow; |
789 | } | 798 | } |
790 | if ((addr & ~mask) == (end & ~mask)) | 799 | if ((addr & ~mask) == (end & ~mask)) |
791 | nptes = end - addr; | 800 | nptes = end - addr; |
@@ -804,11 +813,11 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, | |||
804 | /* walk over the address space and update the page tables */ | 813 | /* walk over the address space and update the page tables */ |
805 | while (addr < end) { | 814 | while (addr < end) { |
806 | pt_idx = addr >> amdgpu_vm_block_size; | 815 | pt_idx = addr >> amdgpu_vm_block_size; |
807 | pt = vm->page_tables[pt_idx].entry.robj; | 816 | pt = vm->page_tables[pt_idx].bo; |
808 | if (params->shadow) { | 817 | if (params->shadow) { |
809 | if (!pt->shadow) | 818 | if (!pt->shadow) |
810 | return; | 819 | return; |
811 | pt = vm->page_tables[pt_idx].entry.robj->shadow; | 820 | pt = pt->shadow; |
812 | } | 821 | } |
813 | 822 | ||
814 | if ((addr & ~mask) == (end & ~mask)) | 823 | if ((addr & ~mask) == (end & ~mask)) |
@@ -1065,8 +1074,8 @@ error_free: | |||
1065 | * @pages_addr: DMA addresses to use for mapping | 1074 | * @pages_addr: DMA addresses to use for mapping |
1066 | * @vm: requested vm | 1075 | * @vm: requested vm |
1067 | * @mapping: mapped range and flags to use for the update | 1076 | * @mapping: mapped range and flags to use for the update |
1068 | * @addr: addr to set the area to | ||
1069 | * @flags: HW flags for the mapping | 1077 | * @flags: HW flags for the mapping |
1078 | * @nodes: array of drm_mm_nodes with the MC addresses | ||
1070 | * @fence: optional resulting fence | 1079 | * @fence: optional resulting fence |
1071 | * | 1080 | * |
1072 | * Split the mapping into smaller chunks so that each update fits | 1081 | * Split the mapping into smaller chunks so that each update fits |
@@ -1079,12 +1088,11 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, | |||
1079 | dma_addr_t *pages_addr, | 1088 | dma_addr_t *pages_addr, |
1080 | struct amdgpu_vm *vm, | 1089 | struct amdgpu_vm *vm, |
1081 | struct amdgpu_bo_va_mapping *mapping, | 1090 | struct amdgpu_bo_va_mapping *mapping, |
1082 | uint32_t flags, uint64_t addr, | 1091 | uint32_t flags, |
1092 | struct drm_mm_node *nodes, | ||
1083 | struct fence **fence) | 1093 | struct fence **fence) |
1084 | { | 1094 | { |
1085 | const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE; | 1095 | uint64_t pfn, src = 0, start = mapping->it.start; |
1086 | |||
1087 | uint64_t src = 0, start = mapping->it.start; | ||
1088 | int r; | 1096 | int r; |
1089 | 1097 | ||
1090 | /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here | 1098 | /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here |
@@ -1097,23 +1105,40 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, | |||
1097 | 1105 | ||
1098 | trace_amdgpu_vm_bo_update(mapping); | 1106 | trace_amdgpu_vm_bo_update(mapping); |
1099 | 1107 | ||
1100 | if (pages_addr) { | 1108 | pfn = mapping->offset >> PAGE_SHIFT; |
1101 | if (flags == gtt_flags) | 1109 | if (nodes) { |
1102 | src = adev->gart.table_addr + (addr >> 12) * 8; | 1110 | while (pfn >= nodes->size) { |
1103 | addr = 0; | 1111 | pfn -= nodes->size; |
1112 | ++nodes; | ||
1113 | } | ||
1104 | } | 1114 | } |
1105 | addr += mapping->offset; | ||
1106 | 1115 | ||
1107 | if (!pages_addr || src) | 1116 | do { |
1108 | return amdgpu_vm_bo_update_mapping(adev, exclusive, | 1117 | uint64_t max_entries; |
1109 | src, pages_addr, vm, | 1118 | uint64_t addr, last; |
1110 | start, mapping->it.last, | 1119 | |
1111 | flags, addr, fence); | 1120 | if (nodes) { |
1121 | addr = nodes->start << PAGE_SHIFT; | ||
1122 | max_entries = (nodes->size - pfn) * | ||
1123 | (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); | ||
1124 | } else { | ||
1125 | addr = 0; | ||
1126 | max_entries = S64_MAX; | ||
1127 | } | ||
1112 | 1128 | ||
1113 | while (start != mapping->it.last + 1) { | 1129 | if (pages_addr) { |
1114 | uint64_t last; | 1130 | if (flags == gtt_flags) |
1131 | src = adev->gart.table_addr + | ||
1132 | (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8; | ||
1133 | else | ||
1134 | max_entries = min(max_entries, 16ull * 1024ull); | ||
1135 | addr = 0; | ||
1136 | } else if (flags & AMDGPU_PTE_VALID) { | ||
1137 | addr += adev->vm_manager.vram_base_offset; | ||
1138 | } | ||
1139 | addr += pfn << PAGE_SHIFT; | ||
1115 | 1140 | ||
1116 | last = min((uint64_t)mapping->it.last, start + max_size - 1); | 1141 | last = min((uint64_t)mapping->it.last, start + max_entries - 1); |
1117 | r = amdgpu_vm_bo_update_mapping(adev, exclusive, | 1142 | r = amdgpu_vm_bo_update_mapping(adev, exclusive, |
1118 | src, pages_addr, vm, | 1143 | src, pages_addr, vm, |
1119 | start, last, flags, addr, | 1144 | start, last, flags, addr, |
@@ -1121,9 +1146,14 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, | |||
1121 | if (r) | 1146 | if (r) |
1122 | return r; | 1147 | return r; |
1123 | 1148 | ||
1149 | pfn += last - start + 1; | ||
1150 | if (nodes && nodes->size == pfn) { | ||
1151 | pfn = 0; | ||
1152 | ++nodes; | ||
1153 | } | ||
1124 | start = last + 1; | 1154 | start = last + 1; |
1125 | addr += max_size * AMDGPU_GPU_PAGE_SIZE; | 1155 | |
1126 | } | 1156 | } while (unlikely(start != mapping->it.last + 1)); |
1127 | 1157 | ||
1128 | return 0; | 1158 | return 0; |
1129 | } | 1159 | } |
@@ -1147,40 +1177,30 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, | |||
1147 | dma_addr_t *pages_addr = NULL; | 1177 | dma_addr_t *pages_addr = NULL; |
1148 | uint32_t gtt_flags, flags; | 1178 | uint32_t gtt_flags, flags; |
1149 | struct ttm_mem_reg *mem; | 1179 | struct ttm_mem_reg *mem; |
1180 | struct drm_mm_node *nodes; | ||
1150 | struct fence *exclusive; | 1181 | struct fence *exclusive; |
1151 | uint64_t addr; | ||
1152 | int r; | 1182 | int r; |
1153 | 1183 | ||
1154 | if (clear) { | 1184 | if (clear) { |
1155 | mem = NULL; | 1185 | mem = NULL; |
1156 | addr = 0; | 1186 | nodes = NULL; |
1157 | exclusive = NULL; | 1187 | exclusive = NULL; |
1158 | } else { | 1188 | } else { |
1159 | struct ttm_dma_tt *ttm; | 1189 | struct ttm_dma_tt *ttm; |
1160 | 1190 | ||
1161 | mem = &bo_va->bo->tbo.mem; | 1191 | mem = &bo_va->bo->tbo.mem; |
1162 | addr = (u64)mem->start << PAGE_SHIFT; | 1192 | nodes = mem->mm_node; |
1163 | switch (mem->mem_type) { | 1193 | if (mem->mem_type == TTM_PL_TT) { |
1164 | case TTM_PL_TT: | ||
1165 | ttm = container_of(bo_va->bo->tbo.ttm, struct | 1194 | ttm = container_of(bo_va->bo->tbo.ttm, struct |
1166 | ttm_dma_tt, ttm); | 1195 | ttm_dma_tt, ttm); |
1167 | pages_addr = ttm->dma_address; | 1196 | pages_addr = ttm->dma_address; |
1168 | break; | ||
1169 | |||
1170 | case TTM_PL_VRAM: | ||
1171 | addr += adev->vm_manager.vram_base_offset; | ||
1172 | break; | ||
1173 | |||
1174 | default: | ||
1175 | break; | ||
1176 | } | 1197 | } |
1177 | |||
1178 | exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv); | 1198 | exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv); |
1179 | } | 1199 | } |
1180 | 1200 | ||
1181 | flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); | 1201 | flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); |
1182 | gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) && | 1202 | gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) && |
1183 | adev == bo_va->bo->adev) ? flags : 0; | 1203 | adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ? flags : 0; |
1184 | 1204 | ||
1185 | spin_lock(&vm->status_lock); | 1205 | spin_lock(&vm->status_lock); |
1186 | if (!list_empty(&bo_va->vm_status)) | 1206 | if (!list_empty(&bo_va->vm_status)) |
@@ -1190,7 +1210,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, | |||
1190 | list_for_each_entry(mapping, &bo_va->invalids, list) { | 1210 | list_for_each_entry(mapping, &bo_va->invalids, list) { |
1191 | r = amdgpu_vm_bo_split_mapping(adev, exclusive, | 1211 | r = amdgpu_vm_bo_split_mapping(adev, exclusive, |
1192 | gtt_flags, pages_addr, vm, | 1212 | gtt_flags, pages_addr, vm, |
1193 | mapping, flags, addr, | 1213 | mapping, flags, nodes, |
1194 | &bo_va->last_pt_update); | 1214 | &bo_va->last_pt_update); |
1195 | if (r) | 1215 | if (r) |
1196 | return r; | 1216 | return r; |
@@ -1405,18 +1425,17 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
1405 | /* walk over the address space and allocate the page tables */ | 1425 | /* walk over the address space and allocate the page tables */ |
1406 | for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { | 1426 | for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { |
1407 | struct reservation_object *resv = vm->page_directory->tbo.resv; | 1427 | struct reservation_object *resv = vm->page_directory->tbo.resv; |
1408 | struct amdgpu_bo_list_entry *entry; | ||
1409 | struct amdgpu_bo *pt; | 1428 | struct amdgpu_bo *pt; |
1410 | 1429 | ||
1411 | entry = &vm->page_tables[pt_idx].entry; | 1430 | if (vm->page_tables[pt_idx].bo) |
1412 | if (entry->robj) | ||
1413 | continue; | 1431 | continue; |
1414 | 1432 | ||
1415 | r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, | 1433 | r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, |
1416 | AMDGPU_GPU_PAGE_SIZE, true, | 1434 | AMDGPU_GPU_PAGE_SIZE, true, |
1417 | AMDGPU_GEM_DOMAIN_VRAM, | 1435 | AMDGPU_GEM_DOMAIN_VRAM, |
1418 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS | | 1436 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS | |
1419 | AMDGPU_GEM_CREATE_SHADOW, | 1437 | AMDGPU_GEM_CREATE_SHADOW | |
1438 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
1420 | NULL, resv, &pt); | 1439 | NULL, resv, &pt); |
1421 | if (r) | 1440 | if (r) |
1422 | goto error_free; | 1441 | goto error_free; |
@@ -1442,11 +1461,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
1442 | } | 1461 | } |
1443 | } | 1462 | } |
1444 | 1463 | ||
1445 | entry->robj = pt; | 1464 | vm->page_tables[pt_idx].bo = pt; |
1446 | entry->priority = 0; | ||
1447 | entry->tv.bo = &entry->robj->tbo; | ||
1448 | entry->tv.shared = true; | ||
1449 | entry->user_pages = NULL; | ||
1450 | vm->page_tables[pt_idx].addr = 0; | 1465 | vm->page_tables[pt_idx].addr = 0; |
1451 | } | 1466 | } |
1452 | 1467 | ||
@@ -1626,7 +1641,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
1626 | r = amdgpu_bo_create(adev, pd_size, align, true, | 1641 | r = amdgpu_bo_create(adev, pd_size, align, true, |
1627 | AMDGPU_GEM_DOMAIN_VRAM, | 1642 | AMDGPU_GEM_DOMAIN_VRAM, |
1628 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS | | 1643 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS | |
1629 | AMDGPU_GEM_CREATE_SHADOW, | 1644 | AMDGPU_GEM_CREATE_SHADOW | |
1645 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
1630 | NULL, NULL, &vm->page_directory); | 1646 | NULL, NULL, &vm->page_directory); |
1631 | if (r) | 1647 | if (r) |
1632 | goto error_free_sched_entity; | 1648 | goto error_free_sched_entity; |
@@ -1697,7 +1713,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
1697 | } | 1713 | } |
1698 | 1714 | ||
1699 | for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) { | 1715 | for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) { |
1700 | struct amdgpu_bo *pt = vm->page_tables[i].entry.robj; | 1716 | struct amdgpu_bo *pt = vm->page_tables[i].bo; |
1701 | 1717 | ||
1702 | if (!pt) | 1718 | if (!pt) |
1703 | continue; | 1719 | continue; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h new file mode 100644 index 000000000000..42a629b56095 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | |||
@@ -0,0 +1,205 @@ | |||
1 | /* | ||
2 | * Copyright 2016 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Christian König | ||
23 | */ | ||
24 | #ifndef __AMDGPU_VM_H__ | ||
25 | #define __AMDGPU_VM_H__ | ||
26 | |||
27 | #include <linux/rbtree.h> | ||
28 | |||
29 | #include "gpu_scheduler.h" | ||
30 | #include "amdgpu_sync.h" | ||
31 | #include "amdgpu_ring.h" | ||
32 | |||
33 | struct amdgpu_bo_va; | ||
34 | struct amdgpu_job; | ||
35 | struct amdgpu_bo_list_entry; | ||
36 | |||
37 | /* | ||
38 | * GPUVM handling | ||
39 | */ | ||
40 | |||
41 | /* maximum number of VMIDs */ | ||
42 | #define AMDGPU_NUM_VM 16 | ||
43 | |||
44 | /* Maximum number of PTEs the hardware can write with one command */ | ||
45 | #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF | ||
46 | |||
47 | /* number of entries in page table */ | ||
48 | #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size) | ||
49 | |||
50 | /* PTBs (Page Table Blocks) need to be aligned to 32K */ | ||
51 | #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 | ||
52 | |||
53 | /* LOG2 number of continuous pages for the fragment field */ | ||
54 | #define AMDGPU_LOG2_PAGES_PER_FRAG 4 | ||
55 | |||
56 | #define AMDGPU_PTE_VALID (1 << 0) | ||
57 | #define AMDGPU_PTE_SYSTEM (1 << 1) | ||
58 | #define AMDGPU_PTE_SNOOPED (1 << 2) | ||
59 | |||
60 | /* VI only */ | ||
61 | #define AMDGPU_PTE_EXECUTABLE (1 << 4) | ||
62 | |||
63 | #define AMDGPU_PTE_READABLE (1 << 5) | ||
64 | #define AMDGPU_PTE_WRITEABLE (1 << 6) | ||
65 | |||
66 | #define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7) | ||
67 | |||
68 | /* How to programm VM fault handling */ | ||
69 | #define AMDGPU_VM_FAULT_STOP_NEVER 0 | ||
70 | #define AMDGPU_VM_FAULT_STOP_FIRST 1 | ||
71 | #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 | ||
72 | |||
73 | struct amdgpu_vm_pt { | ||
74 | struct amdgpu_bo *bo; | ||
75 | uint64_t addr; | ||
76 | }; | ||
77 | |||
78 | struct amdgpu_vm { | ||
79 | /* tree of virtual addresses mapped */ | ||
80 | struct rb_root va; | ||
81 | |||
82 | /* protecting invalidated */ | ||
83 | spinlock_t status_lock; | ||
84 | |||
85 | /* BOs moved, but not yet updated in the PT */ | ||
86 | struct list_head invalidated; | ||
87 | |||
88 | /* BOs cleared in the PT because of a move */ | ||
89 | struct list_head cleared; | ||
90 | |||
91 | /* BO mappings freed, but not yet updated in the PT */ | ||
92 | struct list_head freed; | ||
93 | |||
94 | /* contains the page directory */ | ||
95 | struct amdgpu_bo *page_directory; | ||
96 | unsigned max_pde_used; | ||
97 | struct fence *page_directory_fence; | ||
98 | uint64_t last_eviction_counter; | ||
99 | |||
100 | /* array of page tables, one for each page directory entry */ | ||
101 | struct amdgpu_vm_pt *page_tables; | ||
102 | |||
103 | /* for id and flush management per ring */ | ||
104 | struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS]; | ||
105 | |||
106 | /* protecting freed */ | ||
107 | spinlock_t freed_lock; | ||
108 | |||
109 | /* Scheduler entity for page table updates */ | ||
110 | struct amd_sched_entity entity; | ||
111 | |||
112 | /* client id */ | ||
113 | u64 client_id; | ||
114 | }; | ||
115 | |||
116 | struct amdgpu_vm_id { | ||
117 | struct list_head list; | ||
118 | struct fence *first; | ||
119 | struct amdgpu_sync active; | ||
120 | struct fence *last_flush; | ||
121 | atomic64_t owner; | ||
122 | |||
123 | uint64_t pd_gpu_addr; | ||
124 | /* last flushed PD/PT update */ | ||
125 | struct fence *flushed_updates; | ||
126 | |||
127 | uint32_t current_gpu_reset_count; | ||
128 | |||
129 | uint32_t gds_base; | ||
130 | uint32_t gds_size; | ||
131 | uint32_t gws_base; | ||
132 | uint32_t gws_size; | ||
133 | uint32_t oa_base; | ||
134 | uint32_t oa_size; | ||
135 | }; | ||
136 | |||
137 | struct amdgpu_vm_manager { | ||
138 | /* Handling of VMIDs */ | ||
139 | struct mutex lock; | ||
140 | unsigned num_ids; | ||
141 | struct list_head ids_lru; | ||
142 | struct amdgpu_vm_id ids[AMDGPU_NUM_VM]; | ||
143 | |||
144 | /* Handling of VM fences */ | ||
145 | u64 fence_context; | ||
146 | unsigned seqno[AMDGPU_MAX_RINGS]; | ||
147 | |||
148 | uint32_t max_pfn; | ||
149 | /* vram base address for page table entry */ | ||
150 | u64 vram_base_offset; | ||
151 | /* is vm enabled? */ | ||
152 | bool enabled; | ||
153 | /* vm pte handling */ | ||
154 | const struct amdgpu_vm_pte_funcs *vm_pte_funcs; | ||
155 | struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; | ||
156 | unsigned vm_pte_num_rings; | ||
157 | atomic_t vm_pte_next_ring; | ||
158 | /* client id counter */ | ||
159 | atomic64_t client_counter; | ||
160 | }; | ||
161 | |||
162 | void amdgpu_vm_manager_init(struct amdgpu_device *adev); | ||
163 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev); | ||
164 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
165 | void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
166 | void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, | ||
167 | struct list_head *validated, | ||
168 | struct amdgpu_bo_list_entry *entry); | ||
169 | int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, | ||
170 | int (*callback)(void *p, struct amdgpu_bo *bo), | ||
171 | void *param); | ||
172 | void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, | ||
173 | struct amdgpu_vm *vm); | ||
174 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | ||
175 | struct amdgpu_sync *sync, struct fence *fence, | ||
176 | struct amdgpu_job *job); | ||
177 | int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); | ||
178 | void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); | ||
179 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | ||
180 | struct amdgpu_vm *vm); | ||
181 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | ||
182 | struct amdgpu_vm *vm); | ||
183 | int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, | ||
184 | struct amdgpu_sync *sync); | ||
185 | int amdgpu_vm_bo_update(struct amdgpu_device *adev, | ||
186 | struct amdgpu_bo_va *bo_va, | ||
187 | bool clear); | ||
188 | void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, | ||
189 | struct amdgpu_bo *bo); | ||
190 | struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | ||
191 | struct amdgpu_bo *bo); | ||
192 | struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | ||
193 | struct amdgpu_vm *vm, | ||
194 | struct amdgpu_bo *bo); | ||
195 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, | ||
196 | struct amdgpu_bo_va *bo_va, | ||
197 | uint64_t addr, uint64_t offset, | ||
198 | uint64_t size, uint32_t flags); | ||
199 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | ||
200 | struct amdgpu_bo_va *bo_va, | ||
201 | uint64_t addr); | ||
202 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | ||
203 | struct amdgpu_bo_va *bo_va); | ||
204 | |||
205 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c new file mode 100644 index 000000000000..180eed7c8bca --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | |||
@@ -0,0 +1,222 @@ | |||
1 | /* | ||
2 | * Copyright 2016 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Christian König | ||
23 | */ | ||
24 | |||
25 | #include <drm/drmP.h> | ||
26 | #include "amdgpu.h" | ||
27 | |||
28 | struct amdgpu_vram_mgr { | ||
29 | struct drm_mm mm; | ||
30 | spinlock_t lock; | ||
31 | }; | ||
32 | |||
33 | /** | ||
34 | * amdgpu_vram_mgr_init - init VRAM manager and DRM MM | ||
35 | * | ||
36 | * @man: TTM memory type manager | ||
37 | * @p_size: maximum size of VRAM | ||
38 | * | ||
39 | * Allocate and initialize the VRAM manager. | ||
40 | */ | ||
41 | static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man, | ||
42 | unsigned long p_size) | ||
43 | { | ||
44 | struct amdgpu_vram_mgr *mgr; | ||
45 | |||
46 | mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); | ||
47 | if (!mgr) | ||
48 | return -ENOMEM; | ||
49 | |||
50 | drm_mm_init(&mgr->mm, 0, p_size); | ||
51 | spin_lock_init(&mgr->lock); | ||
52 | man->priv = mgr; | ||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | /** | ||
57 | * amdgpu_vram_mgr_fini - free and destroy VRAM manager | ||
58 | * | ||
59 | * @man: TTM memory type manager | ||
60 | * | ||
61 | * Destroy and free the VRAM manager, returns -EBUSY if ranges are still | ||
62 | * allocated inside it. | ||
63 | */ | ||
64 | static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man) | ||
65 | { | ||
66 | struct amdgpu_vram_mgr *mgr = man->priv; | ||
67 | |||
68 | spin_lock(&mgr->lock); | ||
69 | if (!drm_mm_clean(&mgr->mm)) { | ||
70 | spin_unlock(&mgr->lock); | ||
71 | return -EBUSY; | ||
72 | } | ||
73 | |||
74 | drm_mm_takedown(&mgr->mm); | ||
75 | spin_unlock(&mgr->lock); | ||
76 | kfree(mgr); | ||
77 | man->priv = NULL; | ||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | /** | ||
82 | * amdgpu_vram_mgr_new - allocate new ranges | ||
83 | * | ||
84 | * @man: TTM memory type manager | ||
85 | * @tbo: TTM BO we need this range for | ||
86 | * @place: placement flags and restrictions | ||
87 | * @mem: the resulting mem object | ||
88 | * | ||
89 | * Allocate VRAM for the given BO. | ||
90 | */ | ||
91 | static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, | ||
92 | struct ttm_buffer_object *tbo, | ||
93 | const struct ttm_place *place, | ||
94 | struct ttm_mem_reg *mem) | ||
95 | { | ||
96 | struct amdgpu_bo *bo = container_of(tbo, struct amdgpu_bo, tbo); | ||
97 | struct amdgpu_vram_mgr *mgr = man->priv; | ||
98 | struct drm_mm *mm = &mgr->mm; | ||
99 | struct drm_mm_node *nodes; | ||
100 | enum drm_mm_search_flags sflags = DRM_MM_SEARCH_DEFAULT; | ||
101 | enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT; | ||
102 | unsigned long lpfn, num_nodes, pages_per_node, pages_left; | ||
103 | unsigned i; | ||
104 | int r; | ||
105 | |||
106 | lpfn = place->lpfn; | ||
107 | if (!lpfn) | ||
108 | lpfn = man->size; | ||
109 | |||
110 | if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS || | ||
111 | amdgpu_vram_page_split == -1) { | ||
112 | pages_per_node = ~0ul; | ||
113 | num_nodes = 1; | ||
114 | } else { | ||
115 | pages_per_node = max((uint32_t)amdgpu_vram_page_split, | ||
116 | mem->page_alignment); | ||
117 | num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node); | ||
118 | } | ||
119 | |||
120 | nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL); | ||
121 | if (!nodes) | ||
122 | return -ENOMEM; | ||
123 | |||
124 | if (place->flags & TTM_PL_FLAG_TOPDOWN) { | ||
125 | sflags = DRM_MM_SEARCH_BELOW; | ||
126 | aflags = DRM_MM_CREATE_TOP; | ||
127 | } | ||
128 | |||
129 | pages_left = mem->num_pages; | ||
130 | |||
131 | spin_lock(&mgr->lock); | ||
132 | for (i = 0; i < num_nodes; ++i) { | ||
133 | unsigned long pages = min(pages_left, pages_per_node); | ||
134 | uint32_t alignment = mem->page_alignment; | ||
135 | |||
136 | if (pages == pages_per_node) | ||
137 | alignment = pages_per_node; | ||
138 | else | ||
139 | sflags |= DRM_MM_SEARCH_BEST; | ||
140 | |||
141 | r = drm_mm_insert_node_in_range_generic(mm, &nodes[i], pages, | ||
142 | alignment, 0, | ||
143 | place->fpfn, lpfn, | ||
144 | sflags, aflags); | ||
145 | if (unlikely(r)) | ||
146 | goto error; | ||
147 | |||
148 | pages_left -= pages; | ||
149 | } | ||
150 | spin_unlock(&mgr->lock); | ||
151 | |||
152 | mem->start = num_nodes == 1 ? nodes[0].start : AMDGPU_BO_INVALID_OFFSET; | ||
153 | mem->mm_node = nodes; | ||
154 | |||
155 | return 0; | ||
156 | |||
157 | error: | ||
158 | while (i--) | ||
159 | drm_mm_remove_node(&nodes[i]); | ||
160 | spin_unlock(&mgr->lock); | ||
161 | |||
162 | kfree(nodes); | ||
163 | return r == -ENOSPC ? 0 : r; | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * amdgpu_vram_mgr_del - free ranges | ||
168 | * | ||
169 | * @man: TTM memory type manager | ||
170 | * @tbo: TTM BO we need this range for | ||
171 | * @place: placement flags and restrictions | ||
172 | * @mem: TTM memory object | ||
173 | * | ||
174 | * Free the allocated VRAM again. | ||
175 | */ | ||
176 | static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man, | ||
177 | struct ttm_mem_reg *mem) | ||
178 | { | ||
179 | struct amdgpu_vram_mgr *mgr = man->priv; | ||
180 | struct drm_mm_node *nodes = mem->mm_node; | ||
181 | unsigned pages = mem->num_pages; | ||
182 | |||
183 | if (!mem->mm_node) | ||
184 | return; | ||
185 | |||
186 | spin_lock(&mgr->lock); | ||
187 | while (pages) { | ||
188 | pages -= nodes->size; | ||
189 | drm_mm_remove_node(nodes); | ||
190 | ++nodes; | ||
191 | } | ||
192 | spin_unlock(&mgr->lock); | ||
193 | |||
194 | kfree(mem->mm_node); | ||
195 | mem->mm_node = NULL; | ||
196 | } | ||
197 | |||
198 | /** | ||
199 | * amdgpu_vram_mgr_debug - dump VRAM table | ||
200 | * | ||
201 | * @man: TTM memory type manager | ||
202 | * @prefix: text prefix | ||
203 | * | ||
204 | * Dump the table content using printk. | ||
205 | */ | ||
206 | static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man, | ||
207 | const char *prefix) | ||
208 | { | ||
209 | struct amdgpu_vram_mgr *mgr = man->priv; | ||
210 | |||
211 | spin_lock(&mgr->lock); | ||
212 | drm_mm_debug_table(&mgr->mm, prefix); | ||
213 | spin_unlock(&mgr->lock); | ||
214 | } | ||
215 | |||
216 | const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = { | ||
217 | amdgpu_vram_mgr_init, | ||
218 | amdgpu_vram_mgr_fini, | ||
219 | amdgpu_vram_mgr_new, | ||
220 | amdgpu_vram_mgr_del, | ||
221 | amdgpu_vram_mgr_debug | ||
222 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c index f7d236f95e74..8c9bc75a9c2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "atom.h" | 31 | #include "atom.h" |
32 | #include "atom-bits.h" | 32 | #include "atom-bits.h" |
33 | #include "atombios_encoders.h" | 33 | #include "atombios_encoders.h" |
34 | #include "atombios_crtc.h" | ||
34 | #include "amdgpu_atombios.h" | 35 | #include "amdgpu_atombios.h" |
35 | #include "amdgpu_pll.h" | 36 | #include "amdgpu_pll.h" |
36 | #include "amdgpu_connectors.h" | 37 | #include "amdgpu_connectors.h" |
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 1d8c375a3561..e9b1964d4e61 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c | |||
@@ -887,9 +887,6 @@ static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) | |||
887 | { | 887 | { |
888 | struct ci_power_info *pi = ci_get_pi(adev); | 888 | struct ci_power_info *pi = ci_get_pi(adev); |
889 | 889 | ||
890 | if (pi->uvd_power_gated == gate) | ||
891 | return; | ||
892 | |||
893 | pi->uvd_power_gated = gate; | 890 | pi->uvd_power_gated = gate; |
894 | 891 | ||
895 | ci_update_uvd_dpm(adev, gate); | 892 | ci_update_uvd_dpm(adev, gate); |
@@ -960,6 +957,12 @@ static void ci_apply_state_adjust_rules(struct amdgpu_device *adev, | |||
960 | sclk = ps->performance_levels[0].sclk; | 957 | sclk = ps->performance_levels[0].sclk; |
961 | } | 958 | } |
962 | 959 | ||
960 | if (adev->pm.pm_display_cfg.min_core_set_clock > sclk) | ||
961 | sclk = adev->pm.pm_display_cfg.min_core_set_clock; | ||
962 | |||
963 | if (adev->pm.pm_display_cfg.min_mem_set_clock > mclk) | ||
964 | mclk = adev->pm.pm_display_cfg.min_mem_set_clock; | ||
965 | |||
963 | if (rps->vce_active) { | 966 | if (rps->vce_active) { |
964 | if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) | 967 | if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) |
965 | sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; | 968 | sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; |
@@ -2201,6 +2204,11 @@ static int ci_upload_firmware(struct amdgpu_device *adev) | |||
2201 | struct ci_power_info *pi = ci_get_pi(adev); | 2204 | struct ci_power_info *pi = ci_get_pi(adev); |
2202 | int i, ret; | 2205 | int i, ret; |
2203 | 2206 | ||
2207 | if (amdgpu_ci_is_smc_running(adev)) { | ||
2208 | DRM_INFO("smc is running, no need to load smc firmware\n"); | ||
2209 | return 0; | ||
2210 | } | ||
2211 | |||
2204 | for (i = 0; i < adev->usec_timeout; i++) { | 2212 | for (i = 0; i < adev->usec_timeout; i++) { |
2205 | if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK) | 2213 | if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK) |
2206 | break; | 2214 | break; |
@@ -4190,8 +4198,15 @@ static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate) | |||
4190 | { | 4198 | { |
4191 | struct ci_power_info *pi = ci_get_pi(adev); | 4199 | struct ci_power_info *pi = ci_get_pi(adev); |
4192 | u32 tmp; | 4200 | u32 tmp; |
4201 | int ret = 0; | ||
4193 | 4202 | ||
4194 | if (!gate) { | 4203 | if (!gate) { |
4204 | /* turn the clocks on when decoding */ | ||
4205 | ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, | ||
4206 | AMD_CG_STATE_UNGATE); | ||
4207 | if (ret) | ||
4208 | return ret; | ||
4209 | |||
4195 | if (pi->caps_uvd_dpm || | 4210 | if (pi->caps_uvd_dpm || |
4196 | (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0)) | 4211 | (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0)) |
4197 | pi->smc_state_table.UvdBootLevel = 0; | 4212 | pi->smc_state_table.UvdBootLevel = 0; |
@@ -4203,9 +4218,17 @@ static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate) | |||
4203 | tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK; | 4218 | tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK; |
4204 | tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT); | 4219 | tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT); |
4205 | WREG32_SMC(ixDPM_TABLE_475, tmp); | 4220 | WREG32_SMC(ixDPM_TABLE_475, tmp); |
4221 | ret = ci_enable_uvd_dpm(adev, true); | ||
4222 | } else { | ||
4223 | ret = ci_enable_uvd_dpm(adev, false); | ||
4224 | if (ret) | ||
4225 | return ret; | ||
4226 | |||
4227 | ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, | ||
4228 | AMD_CG_STATE_GATE); | ||
4206 | } | 4229 | } |
4207 | 4230 | ||
4208 | return ci_enable_uvd_dpm(adev, !gate); | 4231 | return ret; |
4209 | } | 4232 | } |
4210 | 4233 | ||
4211 | static u8 ci_get_vce_boot_level(struct amdgpu_device *adev) | 4234 | static u8 ci_get_vce_boot_level(struct amdgpu_device *adev) |
@@ -4247,13 +4270,12 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev, | |||
4247 | 4270 | ||
4248 | ret = ci_enable_vce_dpm(adev, true); | 4271 | ret = ci_enable_vce_dpm(adev, true); |
4249 | } else { | 4272 | } else { |
4273 | ret = ci_enable_vce_dpm(adev, false); | ||
4274 | if (ret) | ||
4275 | return ret; | ||
4250 | /* turn the clocks off when not encoding */ | 4276 | /* turn the clocks off when not encoding */ |
4251 | ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, | 4277 | ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, |
4252 | AMD_CG_STATE_GATE); | 4278 | AMD_CG_STATE_GATE); |
4253 | if (ret) | ||
4254 | return ret; | ||
4255 | |||
4256 | ret = ci_enable_vce_dpm(adev, false); | ||
4257 | } | 4279 | } |
4258 | } | 4280 | } |
4259 | return ret; | 4281 | return ret; |
@@ -5219,6 +5241,7 @@ static void ci_update_current_ps(struct amdgpu_device *adev, | |||
5219 | pi->current_rps = *rps; | 5241 | pi->current_rps = *rps; |
5220 | pi->current_ps = *new_ps; | 5242 | pi->current_ps = *new_ps; |
5221 | pi->current_rps.ps_priv = &pi->current_ps; | 5243 | pi->current_rps.ps_priv = &pi->current_ps; |
5244 | adev->pm.dpm.current_ps = &pi->current_rps; | ||
5222 | } | 5245 | } |
5223 | 5246 | ||
5224 | static void ci_update_requested_ps(struct amdgpu_device *adev, | 5247 | static void ci_update_requested_ps(struct amdgpu_device *adev, |
@@ -5230,6 +5253,7 @@ static void ci_update_requested_ps(struct amdgpu_device *adev, | |||
5230 | pi->requested_rps = *rps; | 5253 | pi->requested_rps = *rps; |
5231 | pi->requested_ps = *new_ps; | 5254 | pi->requested_ps = *new_ps; |
5232 | pi->requested_rps.ps_priv = &pi->requested_ps; | 5255 | pi->requested_rps.ps_priv = &pi->requested_ps; |
5256 | adev->pm.dpm.requested_ps = &pi->requested_rps; | ||
5233 | } | 5257 | } |
5234 | 5258 | ||
5235 | static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev) | 5259 | static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev) |
@@ -5267,8 +5291,6 @@ static int ci_dpm_enable(struct amdgpu_device *adev) | |||
5267 | struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps; | 5291 | struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps; |
5268 | int ret; | 5292 | int ret; |
5269 | 5293 | ||
5270 | if (amdgpu_ci_is_smc_running(adev)) | ||
5271 | return -EINVAL; | ||
5272 | if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) { | 5294 | if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) { |
5273 | ci_enable_voltage_control(adev); | 5295 | ci_enable_voltage_control(adev); |
5274 | ret = ci_construct_voltage_tables(adev); | 5296 | ret = ci_construct_voltage_tables(adev); |
@@ -5689,7 +5711,7 @@ static int ci_parse_power_table(struct amdgpu_device *adev) | |||
5689 | adev->pm.dpm.num_ps = state_array->ucNumEntries; | 5711 | adev->pm.dpm.num_ps = state_array->ucNumEntries; |
5690 | 5712 | ||
5691 | /* fill in the vce power states */ | 5713 | /* fill in the vce power states */ |
5692 | for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) { | 5714 | for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { |
5693 | u32 sclk, mclk; | 5715 | u32 sclk, mclk; |
5694 | clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; | 5716 | clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; |
5695 | clock_info = (union pplib_clock_info *) | 5717 | clock_info = (union pplib_clock_info *) |
@@ -6094,6 +6116,56 @@ static void ci_dpm_print_power_state(struct amdgpu_device *adev, | |||
6094 | amdgpu_dpm_print_ps_status(adev, rps); | 6116 | amdgpu_dpm_print_ps_status(adev, rps); |
6095 | } | 6117 | } |
6096 | 6118 | ||
6119 | static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1, | ||
6120 | const struct ci_pl *ci_cpl2) | ||
6121 | { | ||
6122 | return ((ci_cpl1->mclk == ci_cpl2->mclk) && | ||
6123 | (ci_cpl1->sclk == ci_cpl2->sclk) && | ||
6124 | (ci_cpl1->pcie_gen == ci_cpl2->pcie_gen) && | ||
6125 | (ci_cpl1->pcie_lane == ci_cpl2->pcie_lane)); | ||
6126 | } | ||
6127 | |||
6128 | static int ci_check_state_equal(struct amdgpu_device *adev, | ||
6129 | struct amdgpu_ps *cps, | ||
6130 | struct amdgpu_ps *rps, | ||
6131 | bool *equal) | ||
6132 | { | ||
6133 | struct ci_ps *ci_cps; | ||
6134 | struct ci_ps *ci_rps; | ||
6135 | int i; | ||
6136 | |||
6137 | if (adev == NULL || cps == NULL || rps == NULL || equal == NULL) | ||
6138 | return -EINVAL; | ||
6139 | |||
6140 | ci_cps = ci_get_ps(cps); | ||
6141 | ci_rps = ci_get_ps(rps); | ||
6142 | |||
6143 | if (ci_cps == NULL) { | ||
6144 | *equal = false; | ||
6145 | return 0; | ||
6146 | } | ||
6147 | |||
6148 | if (ci_cps->performance_level_count != ci_rps->performance_level_count) { | ||
6149 | |||
6150 | *equal = false; | ||
6151 | return 0; | ||
6152 | } | ||
6153 | |||
6154 | for (i = 0; i < ci_cps->performance_level_count; i++) { | ||
6155 | if (!ci_are_power_levels_equal(&(ci_cps->performance_levels[i]), | ||
6156 | &(ci_rps->performance_levels[i]))) { | ||
6157 | *equal = false; | ||
6158 | return 0; | ||
6159 | } | ||
6160 | } | ||
6161 | |||
6162 | /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ | ||
6163 | *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk)); | ||
6164 | *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk)); | ||
6165 | |||
6166 | return 0; | ||
6167 | } | ||
6168 | |||
6097 | static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low) | 6169 | static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low) |
6098 | { | 6170 | { |
6099 | struct ci_power_info *pi = ci_get_pi(adev); | 6171 | struct ci_power_info *pi = ci_get_pi(adev); |
@@ -6287,12 +6359,19 @@ static int ci_dpm_suspend(void *handle) | |||
6287 | 6359 | ||
6288 | if (adev->pm.dpm_enabled) { | 6360 | if (adev->pm.dpm_enabled) { |
6289 | mutex_lock(&adev->pm.mutex); | 6361 | mutex_lock(&adev->pm.mutex); |
6290 | /* disable dpm */ | 6362 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, |
6291 | ci_dpm_disable(adev); | 6363 | AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); |
6292 | /* reset the power state */ | 6364 | amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, |
6293 | adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; | 6365 | AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); |
6366 | adev->pm.dpm.last_user_state = adev->pm.dpm.user_state; | ||
6367 | adev->pm.dpm.last_state = adev->pm.dpm.state; | ||
6368 | adev->pm.dpm.user_state = POWER_STATE_TYPE_INTERNAL_BOOT; | ||
6369 | adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_BOOT; | ||
6294 | mutex_unlock(&adev->pm.mutex); | 6370 | mutex_unlock(&adev->pm.mutex); |
6371 | amdgpu_pm_compute_clocks(adev); | ||
6372 | |||
6295 | } | 6373 | } |
6374 | |||
6296 | return 0; | 6375 | return 0; |
6297 | } | 6376 | } |
6298 | 6377 | ||
@@ -6310,6 +6389,8 @@ static int ci_dpm_resume(void *handle) | |||
6310 | adev->pm.dpm_enabled = false; | 6389 | adev->pm.dpm_enabled = false; |
6311 | else | 6390 | else |
6312 | adev->pm.dpm_enabled = true; | 6391 | adev->pm.dpm_enabled = true; |
6392 | adev->pm.dpm.user_state = adev->pm.dpm.last_user_state; | ||
6393 | adev->pm.dpm.state = adev->pm.dpm.last_state; | ||
6313 | mutex_unlock(&adev->pm.mutex); | 6394 | mutex_unlock(&adev->pm.mutex); |
6314 | if (adev->pm.dpm_enabled) | 6395 | if (adev->pm.dpm_enabled) |
6315 | amdgpu_pm_compute_clocks(adev); | 6396 | amdgpu_pm_compute_clocks(adev); |
@@ -6644,6 +6725,8 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = { | |||
6644 | .set_sclk_od = ci_dpm_set_sclk_od, | 6725 | .set_sclk_od = ci_dpm_set_sclk_od, |
6645 | .get_mclk_od = ci_dpm_get_mclk_od, | 6726 | .get_mclk_od = ci_dpm_get_mclk_od, |
6646 | .set_mclk_od = ci_dpm_set_mclk_od, | 6727 | .set_mclk_od = ci_dpm_set_mclk_od, |
6728 | .check_state_equal = ci_check_state_equal, | ||
6729 | .get_vce_clock_state = amdgpu_get_vce_clock_state, | ||
6647 | }; | 6730 | }; |
6648 | 6731 | ||
6649 | static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev) | 6732 | static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev) |
@@ -6662,3 +6745,12 @@ static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev) | |||
6662 | adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; | 6745 | adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; |
6663 | adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs; | 6746 | adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs; |
6664 | } | 6747 | } |
6748 | |||
6749 | const struct amdgpu_ip_block_version ci_dpm_ip_block = | ||
6750 | { | ||
6751 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
6752 | .major = 7, | ||
6753 | .minor = 0, | ||
6754 | .rev = 0, | ||
6755 | .funcs = &ci_dpm_ip_funcs, | ||
6756 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index a845b6a93b79..302df85893ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c | |||
@@ -1189,18 +1189,6 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev) | |||
1189 | return r; | 1189 | return r; |
1190 | } | 1190 | } |
1191 | 1191 | ||
1192 | static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung) | ||
1193 | { | ||
1194 | u32 tmp = RREG32(mmBIOS_SCRATCH_3); | ||
1195 | |||
1196 | if (hung) | ||
1197 | tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; | ||
1198 | else | ||
1199 | tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; | ||
1200 | |||
1201 | WREG32(mmBIOS_SCRATCH_3, tmp); | ||
1202 | } | ||
1203 | |||
1204 | /** | 1192 | /** |
1205 | * cik_asic_reset - soft reset GPU | 1193 | * cik_asic_reset - soft reset GPU |
1206 | * | 1194 | * |
@@ -1213,11 +1201,12 @@ static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hu | |||
1213 | static int cik_asic_reset(struct amdgpu_device *adev) | 1201 | static int cik_asic_reset(struct amdgpu_device *adev) |
1214 | { | 1202 | { |
1215 | int r; | 1203 | int r; |
1216 | cik_set_bios_scratch_engine_hung(adev, true); | 1204 | |
1205 | amdgpu_atombios_scratch_regs_engine_hung(adev, true); | ||
1217 | 1206 | ||
1218 | r = cik_gpu_pci_config_reset(adev); | 1207 | r = cik_gpu_pci_config_reset(adev); |
1219 | 1208 | ||
1220 | cik_set_bios_scratch_engine_hung(adev, false); | 1209 | amdgpu_atombios_scratch_regs_engine_hung(adev, false); |
1221 | 1210 | ||
1222 | return r; | 1211 | return r; |
1223 | } | 1212 | } |
@@ -1641,745 +1630,6 @@ static void cik_detect_hw_virtualization(struct amdgpu_device *adev) | |||
1641 | adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE; | 1630 | adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE; |
1642 | } | 1631 | } |
1643 | 1632 | ||
1644 | static const struct amdgpu_ip_block_version bonaire_ip_blocks[] = | ||
1645 | { | ||
1646 | /* ORDER MATTERS! */ | ||
1647 | { | ||
1648 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1649 | .major = 1, | ||
1650 | .minor = 0, | ||
1651 | .rev = 0, | ||
1652 | .funcs = &cik_common_ip_funcs, | ||
1653 | }, | ||
1654 | { | ||
1655 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1656 | .major = 7, | ||
1657 | .minor = 0, | ||
1658 | .rev = 0, | ||
1659 | .funcs = &gmc_v7_0_ip_funcs, | ||
1660 | }, | ||
1661 | { | ||
1662 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1663 | .major = 2, | ||
1664 | .minor = 0, | ||
1665 | .rev = 0, | ||
1666 | .funcs = &cik_ih_ip_funcs, | ||
1667 | }, | ||
1668 | { | ||
1669 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1670 | .major = 7, | ||
1671 | .minor = 0, | ||
1672 | .rev = 0, | ||
1673 | .funcs = &amdgpu_pp_ip_funcs, | ||
1674 | }, | ||
1675 | { | ||
1676 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
1677 | .major = 8, | ||
1678 | .minor = 2, | ||
1679 | .rev = 0, | ||
1680 | .funcs = &dce_v8_0_ip_funcs, | ||
1681 | }, | ||
1682 | { | ||
1683 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1684 | .major = 7, | ||
1685 | .minor = 2, | ||
1686 | .rev = 0, | ||
1687 | .funcs = &gfx_v7_0_ip_funcs, | ||
1688 | }, | ||
1689 | { | ||
1690 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1691 | .major = 2, | ||
1692 | .minor = 0, | ||
1693 | .rev = 0, | ||
1694 | .funcs = &cik_sdma_ip_funcs, | ||
1695 | }, | ||
1696 | { | ||
1697 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1698 | .major = 4, | ||
1699 | .minor = 2, | ||
1700 | .rev = 0, | ||
1701 | .funcs = &uvd_v4_2_ip_funcs, | ||
1702 | }, | ||
1703 | { | ||
1704 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1705 | .major = 2, | ||
1706 | .minor = 0, | ||
1707 | .rev = 0, | ||
1708 | .funcs = &vce_v2_0_ip_funcs, | ||
1709 | }, | ||
1710 | }; | ||
1711 | |||
1712 | static const struct amdgpu_ip_block_version bonaire_ip_blocks_vd[] = | ||
1713 | { | ||
1714 | /* ORDER MATTERS! */ | ||
1715 | { | ||
1716 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1717 | .major = 1, | ||
1718 | .minor = 0, | ||
1719 | .rev = 0, | ||
1720 | .funcs = &cik_common_ip_funcs, | ||
1721 | }, | ||
1722 | { | ||
1723 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1724 | .major = 7, | ||
1725 | .minor = 0, | ||
1726 | .rev = 0, | ||
1727 | .funcs = &gmc_v7_0_ip_funcs, | ||
1728 | }, | ||
1729 | { | ||
1730 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1731 | .major = 2, | ||
1732 | .minor = 0, | ||
1733 | .rev = 0, | ||
1734 | .funcs = &cik_ih_ip_funcs, | ||
1735 | }, | ||
1736 | { | ||
1737 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1738 | .major = 7, | ||
1739 | .minor = 0, | ||
1740 | .rev = 0, | ||
1741 | .funcs = &amdgpu_pp_ip_funcs, | ||
1742 | }, | ||
1743 | { | ||
1744 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
1745 | .major = 8, | ||
1746 | .minor = 2, | ||
1747 | .rev = 0, | ||
1748 | .funcs = &dce_virtual_ip_funcs, | ||
1749 | }, | ||
1750 | { | ||
1751 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1752 | .major = 7, | ||
1753 | .minor = 2, | ||
1754 | .rev = 0, | ||
1755 | .funcs = &gfx_v7_0_ip_funcs, | ||
1756 | }, | ||
1757 | { | ||
1758 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1759 | .major = 2, | ||
1760 | .minor = 0, | ||
1761 | .rev = 0, | ||
1762 | .funcs = &cik_sdma_ip_funcs, | ||
1763 | }, | ||
1764 | { | ||
1765 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1766 | .major = 4, | ||
1767 | .minor = 2, | ||
1768 | .rev = 0, | ||
1769 | .funcs = &uvd_v4_2_ip_funcs, | ||
1770 | }, | ||
1771 | { | ||
1772 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1773 | .major = 2, | ||
1774 | .minor = 0, | ||
1775 | .rev = 0, | ||
1776 | .funcs = &vce_v2_0_ip_funcs, | ||
1777 | }, | ||
1778 | }; | ||
1779 | |||
1780 | static const struct amdgpu_ip_block_version hawaii_ip_blocks[] = | ||
1781 | { | ||
1782 | /* ORDER MATTERS! */ | ||
1783 | { | ||
1784 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1785 | .major = 1, | ||
1786 | .minor = 0, | ||
1787 | .rev = 0, | ||
1788 | .funcs = &cik_common_ip_funcs, | ||
1789 | }, | ||
1790 | { | ||
1791 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1792 | .major = 7, | ||
1793 | .minor = 0, | ||
1794 | .rev = 0, | ||
1795 | .funcs = &gmc_v7_0_ip_funcs, | ||
1796 | }, | ||
1797 | { | ||
1798 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1799 | .major = 2, | ||
1800 | .minor = 0, | ||
1801 | .rev = 0, | ||
1802 | .funcs = &cik_ih_ip_funcs, | ||
1803 | }, | ||
1804 | { | ||
1805 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1806 | .major = 7, | ||
1807 | .minor = 0, | ||
1808 | .rev = 0, | ||
1809 | .funcs = &amdgpu_pp_ip_funcs, | ||
1810 | }, | ||
1811 | { | ||
1812 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
1813 | .major = 8, | ||
1814 | .minor = 5, | ||
1815 | .rev = 0, | ||
1816 | .funcs = &dce_v8_0_ip_funcs, | ||
1817 | }, | ||
1818 | { | ||
1819 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1820 | .major = 7, | ||
1821 | .minor = 3, | ||
1822 | .rev = 0, | ||
1823 | .funcs = &gfx_v7_0_ip_funcs, | ||
1824 | }, | ||
1825 | { | ||
1826 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1827 | .major = 2, | ||
1828 | .minor = 0, | ||
1829 | .rev = 0, | ||
1830 | .funcs = &cik_sdma_ip_funcs, | ||
1831 | }, | ||
1832 | { | ||
1833 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1834 | .major = 4, | ||
1835 | .minor = 2, | ||
1836 | .rev = 0, | ||
1837 | .funcs = &uvd_v4_2_ip_funcs, | ||
1838 | }, | ||
1839 | { | ||
1840 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1841 | .major = 2, | ||
1842 | .minor = 0, | ||
1843 | .rev = 0, | ||
1844 | .funcs = &vce_v2_0_ip_funcs, | ||
1845 | }, | ||
1846 | }; | ||
1847 | |||
1848 | static const struct amdgpu_ip_block_version hawaii_ip_blocks_vd[] = | ||
1849 | { | ||
1850 | /* ORDER MATTERS! */ | ||
1851 | { | ||
1852 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1853 | .major = 1, | ||
1854 | .minor = 0, | ||
1855 | .rev = 0, | ||
1856 | .funcs = &cik_common_ip_funcs, | ||
1857 | }, | ||
1858 | { | ||
1859 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1860 | .major = 7, | ||
1861 | .minor = 0, | ||
1862 | .rev = 0, | ||
1863 | .funcs = &gmc_v7_0_ip_funcs, | ||
1864 | }, | ||
1865 | { | ||
1866 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1867 | .major = 2, | ||
1868 | .minor = 0, | ||
1869 | .rev = 0, | ||
1870 | .funcs = &cik_ih_ip_funcs, | ||
1871 | }, | ||
1872 | { | ||
1873 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1874 | .major = 7, | ||
1875 | .minor = 0, | ||
1876 | .rev = 0, | ||
1877 | .funcs = &amdgpu_pp_ip_funcs, | ||
1878 | }, | ||
1879 | { | ||
1880 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
1881 | .major = 8, | ||
1882 | .minor = 5, | ||
1883 | .rev = 0, | ||
1884 | .funcs = &dce_virtual_ip_funcs, | ||
1885 | }, | ||
1886 | { | ||
1887 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1888 | .major = 7, | ||
1889 | .minor = 3, | ||
1890 | .rev = 0, | ||
1891 | .funcs = &gfx_v7_0_ip_funcs, | ||
1892 | }, | ||
1893 | { | ||
1894 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1895 | .major = 2, | ||
1896 | .minor = 0, | ||
1897 | .rev = 0, | ||
1898 | .funcs = &cik_sdma_ip_funcs, | ||
1899 | }, | ||
1900 | { | ||
1901 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1902 | .major = 4, | ||
1903 | .minor = 2, | ||
1904 | .rev = 0, | ||
1905 | .funcs = &uvd_v4_2_ip_funcs, | ||
1906 | }, | ||
1907 | { | ||
1908 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1909 | .major = 2, | ||
1910 | .minor = 0, | ||
1911 | .rev = 0, | ||
1912 | .funcs = &vce_v2_0_ip_funcs, | ||
1913 | }, | ||
1914 | }; | ||
1915 | |||
1916 | static const struct amdgpu_ip_block_version kabini_ip_blocks[] = | ||
1917 | { | ||
1918 | /* ORDER MATTERS! */ | ||
1919 | { | ||
1920 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1921 | .major = 1, | ||
1922 | .minor = 0, | ||
1923 | .rev = 0, | ||
1924 | .funcs = &cik_common_ip_funcs, | ||
1925 | }, | ||
1926 | { | ||
1927 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1928 | .major = 7, | ||
1929 | .minor = 0, | ||
1930 | .rev = 0, | ||
1931 | .funcs = &gmc_v7_0_ip_funcs, | ||
1932 | }, | ||
1933 | { | ||
1934 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1935 | .major = 2, | ||
1936 | .minor = 0, | ||
1937 | .rev = 0, | ||
1938 | .funcs = &cik_ih_ip_funcs, | ||
1939 | }, | ||
1940 | { | ||
1941 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1942 | .major = 7, | ||
1943 | .minor = 0, | ||
1944 | .rev = 0, | ||
1945 | .funcs = &amdgpu_pp_ip_funcs, | ||
1946 | }, | ||
1947 | { | ||
1948 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
1949 | .major = 8, | ||
1950 | .minor = 3, | ||
1951 | .rev = 0, | ||
1952 | .funcs = &dce_v8_0_ip_funcs, | ||
1953 | }, | ||
1954 | { | ||
1955 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1956 | .major = 7, | ||
1957 | .minor = 2, | ||
1958 | .rev = 0, | ||
1959 | .funcs = &gfx_v7_0_ip_funcs, | ||
1960 | }, | ||
1961 | { | ||
1962 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1963 | .major = 2, | ||
1964 | .minor = 0, | ||
1965 | .rev = 0, | ||
1966 | .funcs = &cik_sdma_ip_funcs, | ||
1967 | }, | ||
1968 | { | ||
1969 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1970 | .major = 4, | ||
1971 | .minor = 2, | ||
1972 | .rev = 0, | ||
1973 | .funcs = &uvd_v4_2_ip_funcs, | ||
1974 | }, | ||
1975 | { | ||
1976 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1977 | .major = 2, | ||
1978 | .minor = 0, | ||
1979 | .rev = 0, | ||
1980 | .funcs = &vce_v2_0_ip_funcs, | ||
1981 | }, | ||
1982 | }; | ||
1983 | |||
1984 | static const struct amdgpu_ip_block_version kabini_ip_blocks_vd[] = | ||
1985 | { | ||
1986 | /* ORDER MATTERS! */ | ||
1987 | { | ||
1988 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1989 | .major = 1, | ||
1990 | .minor = 0, | ||
1991 | .rev = 0, | ||
1992 | .funcs = &cik_common_ip_funcs, | ||
1993 | }, | ||
1994 | { | ||
1995 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1996 | .major = 7, | ||
1997 | .minor = 0, | ||
1998 | .rev = 0, | ||
1999 | .funcs = &gmc_v7_0_ip_funcs, | ||
2000 | }, | ||
2001 | { | ||
2002 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
2003 | .major = 2, | ||
2004 | .minor = 0, | ||
2005 | .rev = 0, | ||
2006 | .funcs = &cik_ih_ip_funcs, | ||
2007 | }, | ||
2008 | { | ||
2009 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
2010 | .major = 7, | ||
2011 | .minor = 0, | ||
2012 | .rev = 0, | ||
2013 | .funcs = &amdgpu_pp_ip_funcs, | ||
2014 | }, | ||
2015 | { | ||
2016 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
2017 | .major = 8, | ||
2018 | .minor = 3, | ||
2019 | .rev = 0, | ||
2020 | .funcs = &dce_virtual_ip_funcs, | ||
2021 | }, | ||
2022 | { | ||
2023 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
2024 | .major = 7, | ||
2025 | .minor = 2, | ||
2026 | .rev = 0, | ||
2027 | .funcs = &gfx_v7_0_ip_funcs, | ||
2028 | }, | ||
2029 | { | ||
2030 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
2031 | .major = 2, | ||
2032 | .minor = 0, | ||
2033 | .rev = 0, | ||
2034 | .funcs = &cik_sdma_ip_funcs, | ||
2035 | }, | ||
2036 | { | ||
2037 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
2038 | .major = 4, | ||
2039 | .minor = 2, | ||
2040 | .rev = 0, | ||
2041 | .funcs = &uvd_v4_2_ip_funcs, | ||
2042 | }, | ||
2043 | { | ||
2044 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
2045 | .major = 2, | ||
2046 | .minor = 0, | ||
2047 | .rev = 0, | ||
2048 | .funcs = &vce_v2_0_ip_funcs, | ||
2049 | }, | ||
2050 | }; | ||
2051 | |||
2052 | static const struct amdgpu_ip_block_version mullins_ip_blocks[] = | ||
2053 | { | ||
2054 | /* ORDER MATTERS! */ | ||
2055 | { | ||
2056 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
2057 | .major = 1, | ||
2058 | .minor = 0, | ||
2059 | .rev = 0, | ||
2060 | .funcs = &cik_common_ip_funcs, | ||
2061 | }, | ||
2062 | { | ||
2063 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
2064 | .major = 7, | ||
2065 | .minor = 0, | ||
2066 | .rev = 0, | ||
2067 | .funcs = &gmc_v7_0_ip_funcs, | ||
2068 | }, | ||
2069 | { | ||
2070 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
2071 | .major = 2, | ||
2072 | .minor = 0, | ||
2073 | .rev = 0, | ||
2074 | .funcs = &cik_ih_ip_funcs, | ||
2075 | }, | ||
2076 | { | ||
2077 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
2078 | .major = 7, | ||
2079 | .minor = 0, | ||
2080 | .rev = 0, | ||
2081 | .funcs = &amdgpu_pp_ip_funcs, | ||
2082 | }, | ||
2083 | { | ||
2084 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
2085 | .major = 8, | ||
2086 | .minor = 3, | ||
2087 | .rev = 0, | ||
2088 | .funcs = &dce_v8_0_ip_funcs, | ||
2089 | }, | ||
2090 | { | ||
2091 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
2092 | .major = 7, | ||
2093 | .minor = 2, | ||
2094 | .rev = 0, | ||
2095 | .funcs = &gfx_v7_0_ip_funcs, | ||
2096 | }, | ||
2097 | { | ||
2098 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
2099 | .major = 2, | ||
2100 | .minor = 0, | ||
2101 | .rev = 0, | ||
2102 | .funcs = &cik_sdma_ip_funcs, | ||
2103 | }, | ||
2104 | { | ||
2105 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
2106 | .major = 4, | ||
2107 | .minor = 2, | ||
2108 | .rev = 0, | ||
2109 | .funcs = &uvd_v4_2_ip_funcs, | ||
2110 | }, | ||
2111 | { | ||
2112 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
2113 | .major = 2, | ||
2114 | .minor = 0, | ||
2115 | .rev = 0, | ||
2116 | .funcs = &vce_v2_0_ip_funcs, | ||
2117 | }, | ||
2118 | }; | ||
2119 | |||
2120 | static const struct amdgpu_ip_block_version mullins_ip_blocks_vd[] = | ||
2121 | { | ||
2122 | /* ORDER MATTERS! */ | ||
2123 | { | ||
2124 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
2125 | .major = 1, | ||
2126 | .minor = 0, | ||
2127 | .rev = 0, | ||
2128 | .funcs = &cik_common_ip_funcs, | ||
2129 | }, | ||
2130 | { | ||
2131 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
2132 | .major = 7, | ||
2133 | .minor = 0, | ||
2134 | .rev = 0, | ||
2135 | .funcs = &gmc_v7_0_ip_funcs, | ||
2136 | }, | ||
2137 | { | ||
2138 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
2139 | .major = 2, | ||
2140 | .minor = 0, | ||
2141 | .rev = 0, | ||
2142 | .funcs = &cik_ih_ip_funcs, | ||
2143 | }, | ||
2144 | { | ||
2145 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
2146 | .major = 7, | ||
2147 | .minor = 0, | ||
2148 | .rev = 0, | ||
2149 | .funcs = &amdgpu_pp_ip_funcs, | ||
2150 | }, | ||
2151 | { | ||
2152 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
2153 | .major = 8, | ||
2154 | .minor = 3, | ||
2155 | .rev = 0, | ||
2156 | .funcs = &dce_virtual_ip_funcs, | ||
2157 | }, | ||
2158 | { | ||
2159 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
2160 | .major = 7, | ||
2161 | .minor = 2, | ||
2162 | .rev = 0, | ||
2163 | .funcs = &gfx_v7_0_ip_funcs, | ||
2164 | }, | ||
2165 | { | ||
2166 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
2167 | .major = 2, | ||
2168 | .minor = 0, | ||
2169 | .rev = 0, | ||
2170 | .funcs = &cik_sdma_ip_funcs, | ||
2171 | }, | ||
2172 | { | ||
2173 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
2174 | .major = 4, | ||
2175 | .minor = 2, | ||
2176 | .rev = 0, | ||
2177 | .funcs = &uvd_v4_2_ip_funcs, | ||
2178 | }, | ||
2179 | { | ||
2180 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
2181 | .major = 2, | ||
2182 | .minor = 0, | ||
2183 | .rev = 0, | ||
2184 | .funcs = &vce_v2_0_ip_funcs, | ||
2185 | }, | ||
2186 | }; | ||
2187 | |||
2188 | static const struct amdgpu_ip_block_version kaveri_ip_blocks[] = | ||
2189 | { | ||
2190 | /* ORDER MATTERS! */ | ||
2191 | { | ||
2192 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
2193 | .major = 1, | ||
2194 | .minor = 0, | ||
2195 | .rev = 0, | ||
2196 | .funcs = &cik_common_ip_funcs, | ||
2197 | }, | ||
2198 | { | ||
2199 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
2200 | .major = 7, | ||
2201 | .minor = 0, | ||
2202 | .rev = 0, | ||
2203 | .funcs = &gmc_v7_0_ip_funcs, | ||
2204 | }, | ||
2205 | { | ||
2206 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
2207 | .major = 2, | ||
2208 | .minor = 0, | ||
2209 | .rev = 0, | ||
2210 | .funcs = &cik_ih_ip_funcs, | ||
2211 | }, | ||
2212 | { | ||
2213 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
2214 | .major = 7, | ||
2215 | .minor = 0, | ||
2216 | .rev = 0, | ||
2217 | .funcs = &amdgpu_pp_ip_funcs, | ||
2218 | }, | ||
2219 | { | ||
2220 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
2221 | .major = 8, | ||
2222 | .minor = 1, | ||
2223 | .rev = 0, | ||
2224 | .funcs = &dce_v8_0_ip_funcs, | ||
2225 | }, | ||
2226 | { | ||
2227 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
2228 | .major = 7, | ||
2229 | .minor = 1, | ||
2230 | .rev = 0, | ||
2231 | .funcs = &gfx_v7_0_ip_funcs, | ||
2232 | }, | ||
2233 | { | ||
2234 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
2235 | .major = 2, | ||
2236 | .minor = 0, | ||
2237 | .rev = 0, | ||
2238 | .funcs = &cik_sdma_ip_funcs, | ||
2239 | }, | ||
2240 | { | ||
2241 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
2242 | .major = 4, | ||
2243 | .minor = 2, | ||
2244 | .rev = 0, | ||
2245 | .funcs = &uvd_v4_2_ip_funcs, | ||
2246 | }, | ||
2247 | { | ||
2248 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
2249 | .major = 2, | ||
2250 | .minor = 0, | ||
2251 | .rev = 0, | ||
2252 | .funcs = &vce_v2_0_ip_funcs, | ||
2253 | }, | ||
2254 | }; | ||
2255 | |||
2256 | static const struct amdgpu_ip_block_version kaveri_ip_blocks_vd[] = | ||
2257 | { | ||
2258 | /* ORDER MATTERS! */ | ||
2259 | { | ||
2260 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
2261 | .major = 1, | ||
2262 | .minor = 0, | ||
2263 | .rev = 0, | ||
2264 | .funcs = &cik_common_ip_funcs, | ||
2265 | }, | ||
2266 | { | ||
2267 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
2268 | .major = 7, | ||
2269 | .minor = 0, | ||
2270 | .rev = 0, | ||
2271 | .funcs = &gmc_v7_0_ip_funcs, | ||
2272 | }, | ||
2273 | { | ||
2274 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
2275 | .major = 2, | ||
2276 | .minor = 0, | ||
2277 | .rev = 0, | ||
2278 | .funcs = &cik_ih_ip_funcs, | ||
2279 | }, | ||
2280 | { | ||
2281 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
2282 | .major = 7, | ||
2283 | .minor = 0, | ||
2284 | .rev = 0, | ||
2285 | .funcs = &amdgpu_pp_ip_funcs, | ||
2286 | }, | ||
2287 | { | ||
2288 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
2289 | .major = 8, | ||
2290 | .minor = 1, | ||
2291 | .rev = 0, | ||
2292 | .funcs = &dce_virtual_ip_funcs, | ||
2293 | }, | ||
2294 | { | ||
2295 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
2296 | .major = 7, | ||
2297 | .minor = 1, | ||
2298 | .rev = 0, | ||
2299 | .funcs = &gfx_v7_0_ip_funcs, | ||
2300 | }, | ||
2301 | { | ||
2302 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
2303 | .major = 2, | ||
2304 | .minor = 0, | ||
2305 | .rev = 0, | ||
2306 | .funcs = &cik_sdma_ip_funcs, | ||
2307 | }, | ||
2308 | { | ||
2309 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
2310 | .major = 4, | ||
2311 | .minor = 2, | ||
2312 | .rev = 0, | ||
2313 | .funcs = &uvd_v4_2_ip_funcs, | ||
2314 | }, | ||
2315 | { | ||
2316 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
2317 | .major = 2, | ||
2318 | .minor = 0, | ||
2319 | .rev = 0, | ||
2320 | .funcs = &vce_v2_0_ip_funcs, | ||
2321 | }, | ||
2322 | }; | ||
2323 | |||
2324 | int cik_set_ip_blocks(struct amdgpu_device *adev) | ||
2325 | { | ||
2326 | if (adev->enable_virtual_display) { | ||
2327 | switch (adev->asic_type) { | ||
2328 | case CHIP_BONAIRE: | ||
2329 | adev->ip_blocks = bonaire_ip_blocks_vd; | ||
2330 | adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks_vd); | ||
2331 | break; | ||
2332 | case CHIP_HAWAII: | ||
2333 | adev->ip_blocks = hawaii_ip_blocks_vd; | ||
2334 | adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks_vd); | ||
2335 | break; | ||
2336 | case CHIP_KAVERI: | ||
2337 | adev->ip_blocks = kaveri_ip_blocks_vd; | ||
2338 | adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks_vd); | ||
2339 | break; | ||
2340 | case CHIP_KABINI: | ||
2341 | adev->ip_blocks = kabini_ip_blocks_vd; | ||
2342 | adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks_vd); | ||
2343 | break; | ||
2344 | case CHIP_MULLINS: | ||
2345 | adev->ip_blocks = mullins_ip_blocks_vd; | ||
2346 | adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks_vd); | ||
2347 | break; | ||
2348 | default: | ||
2349 | /* FIXME: not supported yet */ | ||
2350 | return -EINVAL; | ||
2351 | } | ||
2352 | } else { | ||
2353 | switch (adev->asic_type) { | ||
2354 | case CHIP_BONAIRE: | ||
2355 | adev->ip_blocks = bonaire_ip_blocks; | ||
2356 | adev->num_ip_blocks = ARRAY_SIZE(bonaire_ip_blocks); | ||
2357 | break; | ||
2358 | case CHIP_HAWAII: | ||
2359 | adev->ip_blocks = hawaii_ip_blocks; | ||
2360 | adev->num_ip_blocks = ARRAY_SIZE(hawaii_ip_blocks); | ||
2361 | break; | ||
2362 | case CHIP_KAVERI: | ||
2363 | adev->ip_blocks = kaveri_ip_blocks; | ||
2364 | adev->num_ip_blocks = ARRAY_SIZE(kaveri_ip_blocks); | ||
2365 | break; | ||
2366 | case CHIP_KABINI: | ||
2367 | adev->ip_blocks = kabini_ip_blocks; | ||
2368 | adev->num_ip_blocks = ARRAY_SIZE(kabini_ip_blocks); | ||
2369 | break; | ||
2370 | case CHIP_MULLINS: | ||
2371 | adev->ip_blocks = mullins_ip_blocks; | ||
2372 | adev->num_ip_blocks = ARRAY_SIZE(mullins_ip_blocks); | ||
2373 | break; | ||
2374 | default: | ||
2375 | /* FIXME: not supported yet */ | ||
2376 | return -EINVAL; | ||
2377 | } | ||
2378 | } | ||
2379 | |||
2380 | return 0; | ||
2381 | } | ||
2382 | |||
2383 | static const struct amdgpu_asic_funcs cik_asic_funcs = | 1633 | static const struct amdgpu_asic_funcs cik_asic_funcs = |
2384 | { | 1634 | { |
2385 | .read_disabled_bios = &cik_read_disabled_bios, | 1635 | .read_disabled_bios = &cik_read_disabled_bios, |
@@ -2612,7 +1862,7 @@ static int cik_common_set_powergating_state(void *handle, | |||
2612 | return 0; | 1862 | return 0; |
2613 | } | 1863 | } |
2614 | 1864 | ||
2615 | const struct amd_ip_funcs cik_common_ip_funcs = { | 1865 | static const struct amd_ip_funcs cik_common_ip_funcs = { |
2616 | .name = "cik_common", | 1866 | .name = "cik_common", |
2617 | .early_init = cik_common_early_init, | 1867 | .early_init = cik_common_early_init, |
2618 | .late_init = NULL, | 1868 | .late_init = NULL, |
@@ -2628,3 +1878,79 @@ const struct amd_ip_funcs cik_common_ip_funcs = { | |||
2628 | .set_clockgating_state = cik_common_set_clockgating_state, | 1878 | .set_clockgating_state = cik_common_set_clockgating_state, |
2629 | .set_powergating_state = cik_common_set_powergating_state, | 1879 | .set_powergating_state = cik_common_set_powergating_state, |
2630 | }; | 1880 | }; |
1881 | |||
1882 | static const struct amdgpu_ip_block_version cik_common_ip_block = | ||
1883 | { | ||
1884 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1885 | .major = 1, | ||
1886 | .minor = 0, | ||
1887 | .rev = 0, | ||
1888 | .funcs = &cik_common_ip_funcs, | ||
1889 | }; | ||
1890 | |||
1891 | int cik_set_ip_blocks(struct amdgpu_device *adev) | ||
1892 | { | ||
1893 | switch (adev->asic_type) { | ||
1894 | case CHIP_BONAIRE: | ||
1895 | amdgpu_ip_block_add(adev, &cik_common_ip_block); | ||
1896 | amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block); | ||
1897 | amdgpu_ip_block_add(adev, &cik_ih_ip_block); | ||
1898 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1899 | if (adev->enable_virtual_display) | ||
1900 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1901 | else | ||
1902 | amdgpu_ip_block_add(adev, &dce_v8_2_ip_block); | ||
1903 | amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block); | ||
1904 | amdgpu_ip_block_add(adev, &cik_sdma_ip_block); | ||
1905 | amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block); | ||
1906 | amdgpu_ip_block_add(adev, &vce_v2_0_ip_block); | ||
1907 | break; | ||
1908 | case CHIP_HAWAII: | ||
1909 | amdgpu_ip_block_add(adev, &cik_common_ip_block); | ||
1910 | amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block); | ||
1911 | amdgpu_ip_block_add(adev, &cik_ih_ip_block); | ||
1912 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1913 | if (adev->enable_virtual_display) | ||
1914 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1915 | else | ||
1916 | amdgpu_ip_block_add(adev, &dce_v8_5_ip_block); | ||
1917 | amdgpu_ip_block_add(adev, &gfx_v7_3_ip_block); | ||
1918 | amdgpu_ip_block_add(adev, &cik_sdma_ip_block); | ||
1919 | amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block); | ||
1920 | amdgpu_ip_block_add(adev, &vce_v2_0_ip_block); | ||
1921 | break; | ||
1922 | case CHIP_KAVERI: | ||
1923 | amdgpu_ip_block_add(adev, &cik_common_ip_block); | ||
1924 | amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block); | ||
1925 | amdgpu_ip_block_add(adev, &cik_ih_ip_block); | ||
1926 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1927 | if (adev->enable_virtual_display) | ||
1928 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1929 | else | ||
1930 | amdgpu_ip_block_add(adev, &dce_v8_1_ip_block); | ||
1931 | amdgpu_ip_block_add(adev, &gfx_v7_1_ip_block); | ||
1932 | amdgpu_ip_block_add(adev, &cik_sdma_ip_block); | ||
1933 | amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block); | ||
1934 | amdgpu_ip_block_add(adev, &vce_v2_0_ip_block); | ||
1935 | break; | ||
1936 | case CHIP_KABINI: | ||
1937 | case CHIP_MULLINS: | ||
1938 | amdgpu_ip_block_add(adev, &cik_common_ip_block); | ||
1939 | amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block); | ||
1940 | amdgpu_ip_block_add(adev, &cik_ih_ip_block); | ||
1941 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1942 | if (adev->enable_virtual_display) | ||
1943 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1944 | else | ||
1945 | amdgpu_ip_block_add(adev, &dce_v8_3_ip_block); | ||
1946 | amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block); | ||
1947 | amdgpu_ip_block_add(adev, &cik_sdma_ip_block); | ||
1948 | amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block); | ||
1949 | amdgpu_ip_block_add(adev, &vce_v2_0_ip_block); | ||
1950 | break; | ||
1951 | default: | ||
1952 | /* FIXME: not supported yet */ | ||
1953 | return -EINVAL; | ||
1954 | } | ||
1955 | return 0; | ||
1956 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h index 5ebd2d7a0327..c4989f51ecef 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.h +++ b/drivers/gpu/drm/amd/amdgpu/cik.h | |||
@@ -24,8 +24,6 @@ | |||
24 | #ifndef __CIK_H__ | 24 | #ifndef __CIK_H__ |
25 | #define __CIK_H__ | 25 | #define __CIK_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs cik_common_ip_funcs; | ||
28 | |||
29 | void cik_srbm_select(struct amdgpu_device *adev, | 27 | void cik_srbm_select(struct amdgpu_device *adev, |
30 | u32 me, u32 pipe, u32 queue, u32 vmid); | 28 | u32 me, u32 pipe, u32 queue, u32 vmid); |
31 | int cik_set_ip_blocks(struct amdgpu_device *adev); | 29 | int cik_set_ip_blocks(struct amdgpu_device *adev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index be3d6f79a864..319b32cdea84 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c | |||
@@ -413,7 +413,7 @@ static int cik_ih_set_powergating_state(void *handle, | |||
413 | return 0; | 413 | return 0; |
414 | } | 414 | } |
415 | 415 | ||
416 | const struct amd_ip_funcs cik_ih_ip_funcs = { | 416 | static const struct amd_ip_funcs cik_ih_ip_funcs = { |
417 | .name = "cik_ih", | 417 | .name = "cik_ih", |
418 | .early_init = cik_ih_early_init, | 418 | .early_init = cik_ih_early_init, |
419 | .late_init = NULL, | 419 | .late_init = NULL, |
@@ -441,3 +441,12 @@ static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev) | |||
441 | if (adev->irq.ih_funcs == NULL) | 441 | if (adev->irq.ih_funcs == NULL) |
442 | adev->irq.ih_funcs = &cik_ih_funcs; | 442 | adev->irq.ih_funcs = &cik_ih_funcs; |
443 | } | 443 | } |
444 | |||
445 | const struct amdgpu_ip_block_version cik_ih_ip_block = | ||
446 | { | ||
447 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
448 | .major = 2, | ||
449 | .minor = 0, | ||
450 | .rev = 0, | ||
451 | .funcs = &cik_ih_ip_funcs, | ||
452 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.h b/drivers/gpu/drm/amd/amdgpu/cik_ih.h index 6b0f375ec244..1d9ddee2868e 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __CIK_IH_H__ | 24 | #ifndef __CIK_IH_H__ |
25 | #define __CIK_IH_H__ | 25 | #define __CIK_IH_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs cik_ih_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version cik_ih_ip_block; |
28 | 28 | ||
29 | #endif | 29 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index cb952acc7133..c7340b6e17c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c | |||
@@ -206,10 +206,10 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | |||
206 | 206 | ||
207 | for (i = 0; i < count; i++) | 207 | for (i = 0; i < count; i++) |
208 | if (sdma && sdma->burst_nop && (i == 0)) | 208 | if (sdma && sdma->burst_nop && (i == 0)) |
209 | amdgpu_ring_write(ring, ring->nop | | 209 | amdgpu_ring_write(ring, ring->funcs->nop | |
210 | SDMA_NOP_COUNT(count - 1)); | 210 | SDMA_NOP_COUNT(count - 1)); |
211 | else | 211 | else |
212 | amdgpu_ring_write(ring, ring->nop); | 212 | amdgpu_ring_write(ring, ring->funcs->nop); |
213 | } | 213 | } |
214 | 214 | ||
215 | /** | 215 | /** |
@@ -848,22 +848,6 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring, | |||
848 | amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ | 848 | amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ |
849 | } | 849 | } |
850 | 850 | ||
851 | static unsigned cik_sdma_ring_get_emit_ib_size(struct amdgpu_ring *ring) | ||
852 | { | ||
853 | return | ||
854 | 7 + 4; /* cik_sdma_ring_emit_ib */ | ||
855 | } | ||
856 | |||
857 | static unsigned cik_sdma_ring_get_dma_frame_size(struct amdgpu_ring *ring) | ||
858 | { | ||
859 | return | ||
860 | 6 + /* cik_sdma_ring_emit_hdp_flush */ | ||
861 | 3 + /* cik_sdma_ring_emit_hdp_invalidate */ | ||
862 | 6 + /* cik_sdma_ring_emit_pipeline_sync */ | ||
863 | 12 + /* cik_sdma_ring_emit_vm_flush */ | ||
864 | 9 + 9 + 9; /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */ | ||
865 | } | ||
866 | |||
867 | static void cik_enable_sdma_mgcg(struct amdgpu_device *adev, | 851 | static void cik_enable_sdma_mgcg(struct amdgpu_device *adev, |
868 | bool enable) | 852 | bool enable) |
869 | { | 853 | { |
@@ -959,11 +943,10 @@ static int cik_sdma_sw_init(void *handle) | |||
959 | ring->ring_obj = NULL; | 943 | ring->ring_obj = NULL; |
960 | sprintf(ring->name, "sdma%d", i); | 944 | sprintf(ring->name, "sdma%d", i); |
961 | r = amdgpu_ring_init(adev, ring, 1024, | 945 | r = amdgpu_ring_init(adev, ring, 1024, |
962 | SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf, | ||
963 | &adev->sdma.trap_irq, | 946 | &adev->sdma.trap_irq, |
964 | (i == 0) ? | 947 | (i == 0) ? |
965 | AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1, | 948 | AMDGPU_SDMA_IRQ_TRAP0 : |
966 | AMDGPU_RING_TYPE_SDMA); | 949 | AMDGPU_SDMA_IRQ_TRAP1); |
967 | if (r) | 950 | if (r) |
968 | return r; | 951 | return r; |
969 | } | 952 | } |
@@ -1207,7 +1190,7 @@ static int cik_sdma_set_powergating_state(void *handle, | |||
1207 | return 0; | 1190 | return 0; |
1208 | } | 1191 | } |
1209 | 1192 | ||
1210 | const struct amd_ip_funcs cik_sdma_ip_funcs = { | 1193 | static const struct amd_ip_funcs cik_sdma_ip_funcs = { |
1211 | .name = "cik_sdma", | 1194 | .name = "cik_sdma", |
1212 | .early_init = cik_sdma_early_init, | 1195 | .early_init = cik_sdma_early_init, |
1213 | .late_init = NULL, | 1196 | .late_init = NULL, |
@@ -1225,10 +1208,19 @@ const struct amd_ip_funcs cik_sdma_ip_funcs = { | |||
1225 | }; | 1208 | }; |
1226 | 1209 | ||
1227 | static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { | 1210 | static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { |
1211 | .type = AMDGPU_RING_TYPE_SDMA, | ||
1212 | .align_mask = 0xf, | ||
1213 | .nop = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), | ||
1228 | .get_rptr = cik_sdma_ring_get_rptr, | 1214 | .get_rptr = cik_sdma_ring_get_rptr, |
1229 | .get_wptr = cik_sdma_ring_get_wptr, | 1215 | .get_wptr = cik_sdma_ring_get_wptr, |
1230 | .set_wptr = cik_sdma_ring_set_wptr, | 1216 | .set_wptr = cik_sdma_ring_set_wptr, |
1231 | .parse_cs = NULL, | 1217 | .emit_frame_size = |
1218 | 6 + /* cik_sdma_ring_emit_hdp_flush */ | ||
1219 | 3 + /* cik_sdma_ring_emit_hdp_invalidate */ | ||
1220 | 6 + /* cik_sdma_ring_emit_pipeline_sync */ | ||
1221 | 12 + /* cik_sdma_ring_emit_vm_flush */ | ||
1222 | 9 + 9 + 9, /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */ | ||
1223 | .emit_ib_size = 7 + 4, /* cik_sdma_ring_emit_ib */ | ||
1232 | .emit_ib = cik_sdma_ring_emit_ib, | 1224 | .emit_ib = cik_sdma_ring_emit_ib, |
1233 | .emit_fence = cik_sdma_ring_emit_fence, | 1225 | .emit_fence = cik_sdma_ring_emit_fence, |
1234 | .emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync, | 1226 | .emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync, |
@@ -1239,8 +1231,6 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { | |||
1239 | .test_ib = cik_sdma_ring_test_ib, | 1231 | .test_ib = cik_sdma_ring_test_ib, |
1240 | .insert_nop = cik_sdma_ring_insert_nop, | 1232 | .insert_nop = cik_sdma_ring_insert_nop, |
1241 | .pad_ib = cik_sdma_ring_pad_ib, | 1233 | .pad_ib = cik_sdma_ring_pad_ib, |
1242 | .get_emit_ib_size = cik_sdma_ring_get_emit_ib_size, | ||
1243 | .get_dma_frame_size = cik_sdma_ring_get_dma_frame_size, | ||
1244 | }; | 1234 | }; |
1245 | 1235 | ||
1246 | static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev) | 1236 | static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev) |
@@ -1352,3 +1342,12 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev) | |||
1352 | adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; | 1342 | adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; |
1353 | } | 1343 | } |
1354 | } | 1344 | } |
1345 | |||
1346 | const struct amdgpu_ip_block_version cik_sdma_ip_block = | ||
1347 | { | ||
1348 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1349 | .major = 2, | ||
1350 | .minor = 0, | ||
1351 | .rev = 0, | ||
1352 | .funcs = &cik_sdma_ip_funcs, | ||
1353 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.h b/drivers/gpu/drm/amd/amdgpu/cik_sdma.h index 027727c677b8..a4a8fe01410b 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __CIK_SDMA_H__ | 24 | #ifndef __CIK_SDMA_H__ |
25 | #define __CIK_SDMA_H__ | 25 | #define __CIK_SDMA_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs cik_sdma_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version cik_sdma_ip_block; |
28 | 28 | ||
29 | #endif | 29 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h index 8659852aea9e..6cbd913fd12e 100644 --- a/drivers/gpu/drm/amd/amdgpu/cikd.h +++ b/drivers/gpu/drm/amd/amdgpu/cikd.h | |||
@@ -43,6 +43,14 @@ | |||
43 | #define CRTC4_REGISTER_OFFSET (0x477c - 0x1b7c) | 43 | #define CRTC4_REGISTER_OFFSET (0x477c - 0x1b7c) |
44 | #define CRTC5_REGISTER_OFFSET (0x4a7c - 0x1b7c) | 44 | #define CRTC5_REGISTER_OFFSET (0x4a7c - 0x1b7c) |
45 | 45 | ||
46 | /* hpd instance offsets */ | ||
47 | #define HPD0_REGISTER_OFFSET (0x1807 - 0x1807) | ||
48 | #define HPD1_REGISTER_OFFSET (0x180a - 0x1807) | ||
49 | #define HPD2_REGISTER_OFFSET (0x180d - 0x1807) | ||
50 | #define HPD3_REGISTER_OFFSET (0x1810 - 0x1807) | ||
51 | #define HPD4_REGISTER_OFFSET (0x1813 - 0x1807) | ||
52 | #define HPD5_REGISTER_OFFSET (0x1816 - 0x1807) | ||
53 | |||
46 | #define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001 | 54 | #define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001 |
47 | #define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003 | 55 | #define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003 |
48 | 56 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index 3c082e143730..352b5fad5a06 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c | |||
@@ -1250,7 +1250,8 @@ static void cz_update_current_ps(struct amdgpu_device *adev, | |||
1250 | 1250 | ||
1251 | pi->current_ps = *ps; | 1251 | pi->current_ps = *ps; |
1252 | pi->current_rps = *rps; | 1252 | pi->current_rps = *rps; |
1253 | pi->current_rps.ps_priv = ps; | 1253 | pi->current_rps.ps_priv = &pi->current_ps; |
1254 | adev->pm.dpm.current_ps = &pi->current_rps; | ||
1254 | 1255 | ||
1255 | } | 1256 | } |
1256 | 1257 | ||
@@ -1262,7 +1263,8 @@ static void cz_update_requested_ps(struct amdgpu_device *adev, | |||
1262 | 1263 | ||
1263 | pi->requested_ps = *ps; | 1264 | pi->requested_ps = *ps; |
1264 | pi->requested_rps = *rps; | 1265 | pi->requested_rps = *rps; |
1265 | pi->requested_rps.ps_priv = ps; | 1266 | pi->requested_rps.ps_priv = &pi->requested_ps; |
1267 | adev->pm.dpm.requested_ps = &pi->requested_rps; | ||
1266 | 1268 | ||
1267 | } | 1269 | } |
1268 | 1270 | ||
@@ -2257,6 +2259,18 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) | |||
2257 | } | 2259 | } |
2258 | } | 2260 | } |
2259 | 2261 | ||
2262 | static int cz_check_state_equal(struct amdgpu_device *adev, | ||
2263 | struct amdgpu_ps *cps, | ||
2264 | struct amdgpu_ps *rps, | ||
2265 | bool *equal) | ||
2266 | { | ||
2267 | if (equal == NULL) | ||
2268 | return -EINVAL; | ||
2269 | |||
2270 | *equal = false; | ||
2271 | return 0; | ||
2272 | } | ||
2273 | |||
2260 | const struct amd_ip_funcs cz_dpm_ip_funcs = { | 2274 | const struct amd_ip_funcs cz_dpm_ip_funcs = { |
2261 | .name = "cz_dpm", | 2275 | .name = "cz_dpm", |
2262 | .early_init = cz_dpm_early_init, | 2276 | .early_init = cz_dpm_early_init, |
@@ -2289,6 +2303,7 @@ static const struct amdgpu_dpm_funcs cz_dpm_funcs = { | |||
2289 | .vblank_too_short = NULL, | 2303 | .vblank_too_short = NULL, |
2290 | .powergate_uvd = cz_dpm_powergate_uvd, | 2304 | .powergate_uvd = cz_dpm_powergate_uvd, |
2291 | .powergate_vce = cz_dpm_powergate_vce, | 2305 | .powergate_vce = cz_dpm_powergate_vce, |
2306 | .check_state_equal = cz_check_state_equal, | ||
2292 | }; | 2307 | }; |
2293 | 2308 | ||
2294 | static void cz_dpm_set_funcs(struct amdgpu_device *adev) | 2309 | static void cz_dpm_set_funcs(struct amdgpu_device *adev) |
@@ -2296,3 +2311,12 @@ static void cz_dpm_set_funcs(struct amdgpu_device *adev) | |||
2296 | if (NULL == adev->pm.funcs) | 2311 | if (NULL == adev->pm.funcs) |
2297 | adev->pm.funcs = &cz_dpm_funcs; | 2312 | adev->pm.funcs = &cz_dpm_funcs; |
2298 | } | 2313 | } |
2314 | |||
2315 | const struct amdgpu_ip_block_version cz_dpm_ip_block = | ||
2316 | { | ||
2317 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
2318 | .major = 8, | ||
2319 | .minor = 0, | ||
2320 | .rev = 0, | ||
2321 | .funcs = &cz_dpm_ip_funcs, | ||
2322 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index 3d23a70b6432..fe7cbb24da7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c | |||
@@ -394,7 +394,7 @@ static int cz_ih_set_powergating_state(void *handle, | |||
394 | return 0; | 394 | return 0; |
395 | } | 395 | } |
396 | 396 | ||
397 | const struct amd_ip_funcs cz_ih_ip_funcs = { | 397 | static const struct amd_ip_funcs cz_ih_ip_funcs = { |
398 | .name = "cz_ih", | 398 | .name = "cz_ih", |
399 | .early_init = cz_ih_early_init, | 399 | .early_init = cz_ih_early_init, |
400 | .late_init = NULL, | 400 | .late_init = NULL, |
@@ -423,3 +423,11 @@ static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev) | |||
423 | adev->irq.ih_funcs = &cz_ih_funcs; | 423 | adev->irq.ih_funcs = &cz_ih_funcs; |
424 | } | 424 | } |
425 | 425 | ||
426 | const struct amdgpu_ip_block_version cz_ih_ip_block = | ||
427 | { | ||
428 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
429 | .major = 3, | ||
430 | .minor = 0, | ||
431 | .rev = 0, | ||
432 | .funcs = &cz_ih_ip_funcs, | ||
433 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.h b/drivers/gpu/drm/amd/amdgpu/cz_ih.h index fc4057a2ecb9..14be7753221b 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __CZ_IH_H__ | 24 | #ifndef __CZ_IH_H__ |
25 | #define __CZ_IH_H__ | 25 | #define __CZ_IH_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs cz_ih_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version cz_ih_ip_block; |
28 | 28 | ||
29 | #endif /* __CZ_IH_H__ */ | 29 | #endif /* __CZ_IH_H__ */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 4108c686aa7c..199d3f7235d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "atombios_encoders.h" | 31 | #include "atombios_encoders.h" |
32 | #include "amdgpu_pll.h" | 32 | #include "amdgpu_pll.h" |
33 | #include "amdgpu_connectors.h" | 33 | #include "amdgpu_connectors.h" |
34 | #include "dce_v10_0.h" | ||
34 | 35 | ||
35 | #include "dce/dce_10_0_d.h" | 36 | #include "dce/dce_10_0_d.h" |
36 | #include "dce/dce_10_0_sh_mask.h" | 37 | #include "dce/dce_10_0_sh_mask.h" |
@@ -330,33 +331,12 @@ static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, | |||
330 | static bool dce_v10_0_hpd_sense(struct amdgpu_device *adev, | 331 | static bool dce_v10_0_hpd_sense(struct amdgpu_device *adev, |
331 | enum amdgpu_hpd_id hpd) | 332 | enum amdgpu_hpd_id hpd) |
332 | { | 333 | { |
333 | int idx; | ||
334 | bool connected = false; | 334 | bool connected = false; |
335 | 335 | ||
336 | switch (hpd) { | 336 | if (hpd >= adev->mode_info.num_hpd) |
337 | case AMDGPU_HPD_1: | ||
338 | idx = 0; | ||
339 | break; | ||
340 | case AMDGPU_HPD_2: | ||
341 | idx = 1; | ||
342 | break; | ||
343 | case AMDGPU_HPD_3: | ||
344 | idx = 2; | ||
345 | break; | ||
346 | case AMDGPU_HPD_4: | ||
347 | idx = 3; | ||
348 | break; | ||
349 | case AMDGPU_HPD_5: | ||
350 | idx = 4; | ||
351 | break; | ||
352 | case AMDGPU_HPD_6: | ||
353 | idx = 5; | ||
354 | break; | ||
355 | default: | ||
356 | return connected; | 337 | return connected; |
357 | } | ||
358 | 338 | ||
359 | if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) & | 339 | if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) & |
360 | DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK) | 340 | DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK) |
361 | connected = true; | 341 | connected = true; |
362 | 342 | ||
@@ -376,37 +356,16 @@ static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev, | |||
376 | { | 356 | { |
377 | u32 tmp; | 357 | u32 tmp; |
378 | bool connected = dce_v10_0_hpd_sense(adev, hpd); | 358 | bool connected = dce_v10_0_hpd_sense(adev, hpd); |
379 | int idx; | ||
380 | 359 | ||
381 | switch (hpd) { | 360 | if (hpd >= adev->mode_info.num_hpd) |
382 | case AMDGPU_HPD_1: | ||
383 | idx = 0; | ||
384 | break; | ||
385 | case AMDGPU_HPD_2: | ||
386 | idx = 1; | ||
387 | break; | ||
388 | case AMDGPU_HPD_3: | ||
389 | idx = 2; | ||
390 | break; | ||
391 | case AMDGPU_HPD_4: | ||
392 | idx = 3; | ||
393 | break; | ||
394 | case AMDGPU_HPD_5: | ||
395 | idx = 4; | ||
396 | break; | ||
397 | case AMDGPU_HPD_6: | ||
398 | idx = 5; | ||
399 | break; | ||
400 | default: | ||
401 | return; | 361 | return; |
402 | } | ||
403 | 362 | ||
404 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]); | 363 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); |
405 | if (connected) | 364 | if (connected) |
406 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0); | 365 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0); |
407 | else | 366 | else |
408 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1); | 367 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1); |
409 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp); | 368 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); |
410 | } | 369 | } |
411 | 370 | ||
412 | /** | 371 | /** |
@@ -422,33 +381,12 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev) | |||
422 | struct drm_device *dev = adev->ddev; | 381 | struct drm_device *dev = adev->ddev; |
423 | struct drm_connector *connector; | 382 | struct drm_connector *connector; |
424 | u32 tmp; | 383 | u32 tmp; |
425 | int idx; | ||
426 | 384 | ||
427 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 385 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
428 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | 386 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); |
429 | 387 | ||
430 | switch (amdgpu_connector->hpd.hpd) { | 388 | if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) |
431 | case AMDGPU_HPD_1: | ||
432 | idx = 0; | ||
433 | break; | ||
434 | case AMDGPU_HPD_2: | ||
435 | idx = 1; | ||
436 | break; | ||
437 | case AMDGPU_HPD_3: | ||
438 | idx = 2; | ||
439 | break; | ||
440 | case AMDGPU_HPD_4: | ||
441 | idx = 3; | ||
442 | break; | ||
443 | case AMDGPU_HPD_5: | ||
444 | idx = 4; | ||
445 | break; | ||
446 | case AMDGPU_HPD_6: | ||
447 | idx = 5; | ||
448 | break; | ||
449 | default: | ||
450 | continue; | 389 | continue; |
451 | } | ||
452 | 390 | ||
453 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || | 391 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || |
454 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { | 392 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { |
@@ -457,24 +395,24 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev) | |||
457 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 | 395 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 |
458 | * also avoid interrupt storms during dpms. | 396 | * also avoid interrupt storms during dpms. |
459 | */ | 397 | */ |
460 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]); | 398 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
461 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); | 399 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); |
462 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp); | 400 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); |
463 | continue; | 401 | continue; |
464 | } | 402 | } |
465 | 403 | ||
466 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); | 404 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
467 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); | 405 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); |
468 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); | 406 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); |
469 | 407 | ||
470 | tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]); | 408 | tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
471 | tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, | 409 | tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, |
472 | DC_HPD_CONNECT_INT_DELAY, | 410 | DC_HPD_CONNECT_INT_DELAY, |
473 | AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS); | 411 | AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS); |
474 | tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, | 412 | tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, |
475 | DC_HPD_DISCONNECT_INT_DELAY, | 413 | DC_HPD_DISCONNECT_INT_DELAY, |
476 | AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS); | 414 | AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS); |
477 | WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp); | 415 | WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); |
478 | 416 | ||
479 | dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); | 417 | dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); |
480 | amdgpu_irq_get(adev, &adev->hpd_irq, | 418 | amdgpu_irq_get(adev, &adev->hpd_irq, |
@@ -495,37 +433,16 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev) | |||
495 | struct drm_device *dev = adev->ddev; | 433 | struct drm_device *dev = adev->ddev; |
496 | struct drm_connector *connector; | 434 | struct drm_connector *connector; |
497 | u32 tmp; | 435 | u32 tmp; |
498 | int idx; | ||
499 | 436 | ||
500 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 437 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
501 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | 438 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); |
502 | 439 | ||
503 | switch (amdgpu_connector->hpd.hpd) { | 440 | if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) |
504 | case AMDGPU_HPD_1: | ||
505 | idx = 0; | ||
506 | break; | ||
507 | case AMDGPU_HPD_2: | ||
508 | idx = 1; | ||
509 | break; | ||
510 | case AMDGPU_HPD_3: | ||
511 | idx = 2; | ||
512 | break; | ||
513 | case AMDGPU_HPD_4: | ||
514 | idx = 3; | ||
515 | break; | ||
516 | case AMDGPU_HPD_5: | ||
517 | idx = 4; | ||
518 | break; | ||
519 | case AMDGPU_HPD_6: | ||
520 | idx = 5; | ||
521 | break; | ||
522 | default: | ||
523 | continue; | 441 | continue; |
524 | } | ||
525 | 442 | ||
526 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); | 443 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
527 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0); | 444 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0); |
528 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); | 445 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); |
529 | 446 | ||
530 | amdgpu_irq_put(adev, &adev->hpd_irq, | 447 | amdgpu_irq_put(adev, &adev->hpd_irq, |
531 | amdgpu_connector->hpd.hpd); | 448 | amdgpu_connector->hpd.hpd); |
@@ -3554,7 +3471,7 @@ static int dce_v10_0_set_powergating_state(void *handle, | |||
3554 | return 0; | 3471 | return 0; |
3555 | } | 3472 | } |
3556 | 3473 | ||
3557 | const struct amd_ip_funcs dce_v10_0_ip_funcs = { | 3474 | static const struct amd_ip_funcs dce_v10_0_ip_funcs = { |
3558 | .name = "dce_v10_0", | 3475 | .name = "dce_v10_0", |
3559 | .early_init = dce_v10_0_early_init, | 3476 | .early_init = dce_v10_0_early_init, |
3560 | .late_init = NULL, | 3477 | .late_init = NULL, |
@@ -3885,3 +3802,21 @@ static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev) | |||
3885 | adev->hpd_irq.num_types = AMDGPU_HPD_LAST; | 3802 | adev->hpd_irq.num_types = AMDGPU_HPD_LAST; |
3886 | adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs; | 3803 | adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs; |
3887 | } | 3804 | } |
3805 | |||
3806 | const struct amdgpu_ip_block_version dce_v10_0_ip_block = | ||
3807 | { | ||
3808 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
3809 | .major = 10, | ||
3810 | .minor = 0, | ||
3811 | .rev = 0, | ||
3812 | .funcs = &dce_v10_0_ip_funcs, | ||
3813 | }; | ||
3814 | |||
3815 | const struct amdgpu_ip_block_version dce_v10_1_ip_block = | ||
3816 | { | ||
3817 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
3818 | .major = 10, | ||
3819 | .minor = 1, | ||
3820 | .rev = 0, | ||
3821 | .funcs = &dce_v10_0_ip_funcs, | ||
3822 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h index e3dc04d293e4..7a0747789f1d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h | |||
@@ -24,7 +24,9 @@ | |||
24 | #ifndef __DCE_V10_0_H__ | 24 | #ifndef __DCE_V10_0_H__ |
25 | #define __DCE_V10_0_H__ | 25 | #define __DCE_V10_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs dce_v10_0_ip_funcs; | 27 | |
28 | extern const struct amdgpu_ip_block_version dce_v10_0_ip_block; | ||
29 | extern const struct amdgpu_ip_block_version dce_v10_1_ip_block; | ||
28 | 30 | ||
29 | void dce_v10_0_disable_dce(struct amdgpu_device *adev); | 31 | void dce_v10_0_disable_dce(struct amdgpu_device *adev); |
30 | 32 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index f264b8f17ad1..ecd000e35981 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "atombios_encoders.h" | 31 | #include "atombios_encoders.h" |
32 | #include "amdgpu_pll.h" | 32 | #include "amdgpu_pll.h" |
33 | #include "amdgpu_connectors.h" | 33 | #include "amdgpu_connectors.h" |
34 | #include "dce_v11_0.h" | ||
34 | 35 | ||
35 | #include "dce/dce_11_0_d.h" | 36 | #include "dce/dce_11_0_d.h" |
36 | #include "dce/dce_11_0_sh_mask.h" | 37 | #include "dce/dce_11_0_sh_mask.h" |
@@ -346,33 +347,12 @@ static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, | |||
346 | static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev, | 347 | static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev, |
347 | enum amdgpu_hpd_id hpd) | 348 | enum amdgpu_hpd_id hpd) |
348 | { | 349 | { |
349 | int idx; | ||
350 | bool connected = false; | 350 | bool connected = false; |
351 | 351 | ||
352 | switch (hpd) { | 352 | if (hpd >= adev->mode_info.num_hpd) |
353 | case AMDGPU_HPD_1: | ||
354 | idx = 0; | ||
355 | break; | ||
356 | case AMDGPU_HPD_2: | ||
357 | idx = 1; | ||
358 | break; | ||
359 | case AMDGPU_HPD_3: | ||
360 | idx = 2; | ||
361 | break; | ||
362 | case AMDGPU_HPD_4: | ||
363 | idx = 3; | ||
364 | break; | ||
365 | case AMDGPU_HPD_5: | ||
366 | idx = 4; | ||
367 | break; | ||
368 | case AMDGPU_HPD_6: | ||
369 | idx = 5; | ||
370 | break; | ||
371 | default: | ||
372 | return connected; | 353 | return connected; |
373 | } | ||
374 | 354 | ||
375 | if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) & | 355 | if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) & |
376 | DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK) | 356 | DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK) |
377 | connected = true; | 357 | connected = true; |
378 | 358 | ||
@@ -392,37 +372,16 @@ static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev, | |||
392 | { | 372 | { |
393 | u32 tmp; | 373 | u32 tmp; |
394 | bool connected = dce_v11_0_hpd_sense(adev, hpd); | 374 | bool connected = dce_v11_0_hpd_sense(adev, hpd); |
395 | int idx; | ||
396 | 375 | ||
397 | switch (hpd) { | 376 | if (hpd >= adev->mode_info.num_hpd) |
398 | case AMDGPU_HPD_1: | ||
399 | idx = 0; | ||
400 | break; | ||
401 | case AMDGPU_HPD_2: | ||
402 | idx = 1; | ||
403 | break; | ||
404 | case AMDGPU_HPD_3: | ||
405 | idx = 2; | ||
406 | break; | ||
407 | case AMDGPU_HPD_4: | ||
408 | idx = 3; | ||
409 | break; | ||
410 | case AMDGPU_HPD_5: | ||
411 | idx = 4; | ||
412 | break; | ||
413 | case AMDGPU_HPD_6: | ||
414 | idx = 5; | ||
415 | break; | ||
416 | default: | ||
417 | return; | 377 | return; |
418 | } | ||
419 | 378 | ||
420 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]); | 379 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); |
421 | if (connected) | 380 | if (connected) |
422 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0); | 381 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0); |
423 | else | 382 | else |
424 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1); | 383 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1); |
425 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp); | 384 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); |
426 | } | 385 | } |
427 | 386 | ||
428 | /** | 387 | /** |
@@ -438,33 +397,12 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev) | |||
438 | struct drm_device *dev = adev->ddev; | 397 | struct drm_device *dev = adev->ddev; |
439 | struct drm_connector *connector; | 398 | struct drm_connector *connector; |
440 | u32 tmp; | 399 | u32 tmp; |
441 | int idx; | ||
442 | 400 | ||
443 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 401 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
444 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | 402 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); |
445 | 403 | ||
446 | switch (amdgpu_connector->hpd.hpd) { | 404 | if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) |
447 | case AMDGPU_HPD_1: | ||
448 | idx = 0; | ||
449 | break; | ||
450 | case AMDGPU_HPD_2: | ||
451 | idx = 1; | ||
452 | break; | ||
453 | case AMDGPU_HPD_3: | ||
454 | idx = 2; | ||
455 | break; | ||
456 | case AMDGPU_HPD_4: | ||
457 | idx = 3; | ||
458 | break; | ||
459 | case AMDGPU_HPD_5: | ||
460 | idx = 4; | ||
461 | break; | ||
462 | case AMDGPU_HPD_6: | ||
463 | idx = 5; | ||
464 | break; | ||
465 | default: | ||
466 | continue; | 405 | continue; |
467 | } | ||
468 | 406 | ||
469 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || | 407 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || |
470 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { | 408 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { |
@@ -473,24 +411,24 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev) | |||
473 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 | 411 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 |
474 | * also avoid interrupt storms during dpms. | 412 | * also avoid interrupt storms during dpms. |
475 | */ | 413 | */ |
476 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]); | 414 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
477 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); | 415 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); |
478 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp); | 416 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); |
479 | continue; | 417 | continue; |
480 | } | 418 | } |
481 | 419 | ||
482 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); | 420 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
483 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); | 421 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); |
484 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); | 422 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); |
485 | 423 | ||
486 | tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]); | 424 | tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
487 | tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, | 425 | tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, |
488 | DC_HPD_CONNECT_INT_DELAY, | 426 | DC_HPD_CONNECT_INT_DELAY, |
489 | AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS); | 427 | AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS); |
490 | tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, | 428 | tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, |
491 | DC_HPD_DISCONNECT_INT_DELAY, | 429 | DC_HPD_DISCONNECT_INT_DELAY, |
492 | AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS); | 430 | AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS); |
493 | WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp); | 431 | WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); |
494 | 432 | ||
495 | dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); | 433 | dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); |
496 | amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); | 434 | amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); |
@@ -510,37 +448,16 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev) | |||
510 | struct drm_device *dev = adev->ddev; | 448 | struct drm_device *dev = adev->ddev; |
511 | struct drm_connector *connector; | 449 | struct drm_connector *connector; |
512 | u32 tmp; | 450 | u32 tmp; |
513 | int idx; | ||
514 | 451 | ||
515 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 452 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
516 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | 453 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); |
517 | 454 | ||
518 | switch (amdgpu_connector->hpd.hpd) { | 455 | if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) |
519 | case AMDGPU_HPD_1: | ||
520 | idx = 0; | ||
521 | break; | ||
522 | case AMDGPU_HPD_2: | ||
523 | idx = 1; | ||
524 | break; | ||
525 | case AMDGPU_HPD_3: | ||
526 | idx = 2; | ||
527 | break; | ||
528 | case AMDGPU_HPD_4: | ||
529 | idx = 3; | ||
530 | break; | ||
531 | case AMDGPU_HPD_5: | ||
532 | idx = 4; | ||
533 | break; | ||
534 | case AMDGPU_HPD_6: | ||
535 | idx = 5; | ||
536 | break; | ||
537 | default: | ||
538 | continue; | 456 | continue; |
539 | } | ||
540 | 457 | ||
541 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); | 458 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
542 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0); | 459 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0); |
543 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); | 460 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); |
544 | 461 | ||
545 | amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); | 462 | amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); |
546 | } | 463 | } |
@@ -3611,7 +3528,7 @@ static int dce_v11_0_set_powergating_state(void *handle, | |||
3611 | return 0; | 3528 | return 0; |
3612 | } | 3529 | } |
3613 | 3530 | ||
3614 | const struct amd_ip_funcs dce_v11_0_ip_funcs = { | 3531 | static const struct amd_ip_funcs dce_v11_0_ip_funcs = { |
3615 | .name = "dce_v11_0", | 3532 | .name = "dce_v11_0", |
3616 | .early_init = dce_v11_0_early_init, | 3533 | .early_init = dce_v11_0_early_init, |
3617 | .late_init = NULL, | 3534 | .late_init = NULL, |
@@ -3941,3 +3858,21 @@ static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev) | |||
3941 | adev->hpd_irq.num_types = AMDGPU_HPD_LAST; | 3858 | adev->hpd_irq.num_types = AMDGPU_HPD_LAST; |
3942 | adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs; | 3859 | adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs; |
3943 | } | 3860 | } |
3861 | |||
3862 | const struct amdgpu_ip_block_version dce_v11_0_ip_block = | ||
3863 | { | ||
3864 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
3865 | .major = 11, | ||
3866 | .minor = 0, | ||
3867 | .rev = 0, | ||
3868 | .funcs = &dce_v11_0_ip_funcs, | ||
3869 | }; | ||
3870 | |||
3871 | const struct amdgpu_ip_block_version dce_v11_2_ip_block = | ||
3872 | { | ||
3873 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
3874 | .major = 11, | ||
3875 | .minor = 2, | ||
3876 | .rev = 0, | ||
3877 | .funcs = &dce_v11_0_ip_funcs, | ||
3878 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h index 1f58a65ba2ef..0d878ca3acba 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h | |||
@@ -24,7 +24,8 @@ | |||
24 | #ifndef __DCE_V11_0_H__ | 24 | #ifndef __DCE_V11_0_H__ |
25 | #define __DCE_V11_0_H__ | 25 | #define __DCE_V11_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs dce_v11_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version dce_v11_0_ip_block; |
28 | extern const struct amdgpu_ip_block_version dce_v11_2_ip_block; | ||
28 | 29 | ||
29 | void dce_v11_0_disable_dce(struct amdgpu_device *adev); | 30 | void dce_v11_0_disable_dce(struct amdgpu_device *adev); |
30 | 31 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index b948d6cb1399..44547f951d92 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | |||
@@ -46,6 +46,16 @@ static const u32 crtc_offsets[6] = | |||
46 | SI_CRTC5_REGISTER_OFFSET | 46 | SI_CRTC5_REGISTER_OFFSET |
47 | }; | 47 | }; |
48 | 48 | ||
49 | static const u32 hpd_offsets[] = | ||
50 | { | ||
51 | DC_HPD1_INT_STATUS - DC_HPD1_INT_STATUS, | ||
52 | DC_HPD2_INT_STATUS - DC_HPD1_INT_STATUS, | ||
53 | DC_HPD3_INT_STATUS - DC_HPD1_INT_STATUS, | ||
54 | DC_HPD4_INT_STATUS - DC_HPD1_INT_STATUS, | ||
55 | DC_HPD5_INT_STATUS - DC_HPD1_INT_STATUS, | ||
56 | DC_HPD6_INT_STATUS - DC_HPD1_INT_STATUS, | ||
57 | }; | ||
58 | |||
49 | static const uint32_t dig_offsets[] = { | 59 | static const uint32_t dig_offsets[] = { |
50 | SI_CRTC0_REGISTER_OFFSET, | 60 | SI_CRTC0_REGISTER_OFFSET, |
51 | SI_CRTC1_REGISTER_OFFSET, | 61 | SI_CRTC1_REGISTER_OFFSET, |
@@ -94,15 +104,6 @@ static const struct { | |||
94 | .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK | 104 | .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK |
95 | } }; | 105 | } }; |
96 | 106 | ||
97 | static const uint32_t hpd_int_control_offsets[6] = { | ||
98 | DC_HPD1_INT_CONTROL, | ||
99 | DC_HPD2_INT_CONTROL, | ||
100 | DC_HPD3_INT_CONTROL, | ||
101 | DC_HPD4_INT_CONTROL, | ||
102 | DC_HPD5_INT_CONTROL, | ||
103 | DC_HPD6_INT_CONTROL, | ||
104 | }; | ||
105 | |||
106 | static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev, | 107 | static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev, |
107 | u32 block_offset, u32 reg) | 108 | u32 block_offset, u32 reg) |
108 | { | 109 | { |
@@ -257,34 +258,11 @@ static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev, | |||
257 | { | 258 | { |
258 | bool connected = false; | 259 | bool connected = false; |
259 | 260 | ||
260 | switch (hpd) { | 261 | if (hpd >= adev->mode_info.num_hpd) |
261 | case AMDGPU_HPD_1: | 262 | return connected; |
262 | if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE) | 263 | |
263 | connected = true; | 264 | if (RREG32(DC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPDx_SENSE) |
264 | break; | 265 | connected = true; |
265 | case AMDGPU_HPD_2: | ||
266 | if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE) | ||
267 | connected = true; | ||
268 | break; | ||
269 | case AMDGPU_HPD_3: | ||
270 | if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE) | ||
271 | connected = true; | ||
272 | break; | ||
273 | case AMDGPU_HPD_4: | ||
274 | if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE) | ||
275 | connected = true; | ||
276 | break; | ||
277 | case AMDGPU_HPD_5: | ||
278 | if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE) | ||
279 | connected = true; | ||
280 | break; | ||
281 | case AMDGPU_HPD_6: | ||
282 | if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) | ||
283 | connected = true; | ||
284 | break; | ||
285 | default: | ||
286 | break; | ||
287 | } | ||
288 | 266 | ||
289 | return connected; | 267 | return connected; |
290 | } | 268 | } |
@@ -303,58 +281,15 @@ static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev, | |||
303 | u32 tmp; | 281 | u32 tmp; |
304 | bool connected = dce_v6_0_hpd_sense(adev, hpd); | 282 | bool connected = dce_v6_0_hpd_sense(adev, hpd); |
305 | 283 | ||
306 | switch (hpd) { | 284 | if (hpd >= adev->mode_info.num_hpd) |
307 | case AMDGPU_HPD_1: | 285 | return; |
308 | tmp = RREG32(DC_HPD1_INT_CONTROL); | 286 | |
309 | if (connected) | 287 | tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd]); |
310 | tmp &= ~DC_HPDx_INT_POLARITY; | 288 | if (connected) |
311 | else | 289 | tmp &= ~DC_HPDx_INT_POLARITY; |
312 | tmp |= DC_HPDx_INT_POLARITY; | 290 | else |
313 | WREG32(DC_HPD1_INT_CONTROL, tmp); | 291 | tmp |= DC_HPDx_INT_POLARITY; |
314 | break; | 292 | WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); |
315 | case AMDGPU_HPD_2: | ||
316 | tmp = RREG32(DC_HPD2_INT_CONTROL); | ||
317 | if (connected) | ||
318 | tmp &= ~DC_HPDx_INT_POLARITY; | ||
319 | else | ||
320 | tmp |= DC_HPDx_INT_POLARITY; | ||
321 | WREG32(DC_HPD2_INT_CONTROL, tmp); | ||
322 | break; | ||
323 | case AMDGPU_HPD_3: | ||
324 | tmp = RREG32(DC_HPD3_INT_CONTROL); | ||
325 | if (connected) | ||
326 | tmp &= ~DC_HPDx_INT_POLARITY; | ||
327 | else | ||
328 | tmp |= DC_HPDx_INT_POLARITY; | ||
329 | WREG32(DC_HPD3_INT_CONTROL, tmp); | ||
330 | break; | ||
331 | case AMDGPU_HPD_4: | ||
332 | tmp = RREG32(DC_HPD4_INT_CONTROL); | ||
333 | if (connected) | ||
334 | tmp &= ~DC_HPDx_INT_POLARITY; | ||
335 | else | ||
336 | tmp |= DC_HPDx_INT_POLARITY; | ||
337 | WREG32(DC_HPD4_INT_CONTROL, tmp); | ||
338 | break; | ||
339 | case AMDGPU_HPD_5: | ||
340 | tmp = RREG32(DC_HPD5_INT_CONTROL); | ||
341 | if (connected) | ||
342 | tmp &= ~DC_HPDx_INT_POLARITY; | ||
343 | else | ||
344 | tmp |= DC_HPDx_INT_POLARITY; | ||
345 | WREG32(DC_HPD5_INT_CONTROL, tmp); | ||
346 | break; | ||
347 | case AMDGPU_HPD_6: | ||
348 | tmp = RREG32(DC_HPD6_INT_CONTROL); | ||
349 | if (connected) | ||
350 | tmp &= ~DC_HPDx_INT_POLARITY; | ||
351 | else | ||
352 | tmp |= DC_HPDx_INT_POLARITY; | ||
353 | WREG32(DC_HPD6_INT_CONTROL, tmp); | ||
354 | break; | ||
355 | default: | ||
356 | break; | ||
357 | } | ||
358 | } | 293 | } |
359 | 294 | ||
360 | /** | 295 | /** |
@@ -369,34 +304,17 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev) | |||
369 | { | 304 | { |
370 | struct drm_device *dev = adev->ddev; | 305 | struct drm_device *dev = adev->ddev; |
371 | struct drm_connector *connector; | 306 | struct drm_connector *connector; |
372 | u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | | 307 | u32 tmp; |
373 | DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN; | ||
374 | 308 | ||
375 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 309 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
376 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | 310 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); |
377 | 311 | ||
378 | switch (amdgpu_connector->hpd.hpd) { | 312 | if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) |
379 | case AMDGPU_HPD_1: | 313 | continue; |
380 | WREG32(DC_HPD1_CONTROL, tmp); | 314 | |
381 | break; | 315 | tmp = RREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
382 | case AMDGPU_HPD_2: | 316 | tmp |= DC_HPDx_EN; |
383 | WREG32(DC_HPD2_CONTROL, tmp); | 317 | WREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); |
384 | break; | ||
385 | case AMDGPU_HPD_3: | ||
386 | WREG32(DC_HPD3_CONTROL, tmp); | ||
387 | break; | ||
388 | case AMDGPU_HPD_4: | ||
389 | WREG32(DC_HPD4_CONTROL, tmp); | ||
390 | break; | ||
391 | case AMDGPU_HPD_5: | ||
392 | WREG32(DC_HPD5_CONTROL, tmp); | ||
393 | break; | ||
394 | case AMDGPU_HPD_6: | ||
395 | WREG32(DC_HPD6_CONTROL, tmp); | ||
396 | break; | ||
397 | default: | ||
398 | break; | ||
399 | } | ||
400 | 318 | ||
401 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || | 319 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || |
402 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { | 320 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { |
@@ -405,34 +323,9 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev) | |||
405 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 | 323 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 |
406 | * also avoid interrupt storms during dpms. | 324 | * also avoid interrupt storms during dpms. |
407 | */ | 325 | */ |
408 | u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl; | 326 | tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
409 | 327 | tmp &= ~DC_HPDx_INT_EN; | |
410 | switch (amdgpu_connector->hpd.hpd) { | 328 | WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); |
411 | case AMDGPU_HPD_1: | ||
412 | dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL; | ||
413 | break; | ||
414 | case AMDGPU_HPD_2: | ||
415 | dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL; | ||
416 | break; | ||
417 | case AMDGPU_HPD_3: | ||
418 | dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL; | ||
419 | break; | ||
420 | case AMDGPU_HPD_4: | ||
421 | dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL; | ||
422 | break; | ||
423 | case AMDGPU_HPD_5: | ||
424 | dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL; | ||
425 | break; | ||
426 | case AMDGPU_HPD_6: | ||
427 | dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL; | ||
428 | break; | ||
429 | default: | ||
430 | continue; | ||
431 | } | ||
432 | |||
433 | dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); | ||
434 | dc_hpd_int_cntl &= ~DC_HPDx_INT_EN; | ||
435 | WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); | ||
436 | continue; | 329 | continue; |
437 | } | 330 | } |
438 | 331 | ||
@@ -454,32 +347,18 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev) | |||
454 | { | 347 | { |
455 | struct drm_device *dev = adev->ddev; | 348 | struct drm_device *dev = adev->ddev; |
456 | struct drm_connector *connector; | 349 | struct drm_connector *connector; |
350 | u32 tmp; | ||
457 | 351 | ||
458 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 352 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
459 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | 353 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); |
460 | 354 | ||
461 | switch (amdgpu_connector->hpd.hpd) { | 355 | if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) |
462 | case AMDGPU_HPD_1: | 356 | continue; |
463 | WREG32(DC_HPD1_CONTROL, 0); | 357 | |
464 | break; | 358 | tmp = RREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
465 | case AMDGPU_HPD_2: | 359 | tmp &= ~DC_HPDx_EN; |
466 | WREG32(DC_HPD2_CONTROL, 0); | 360 | WREG32(DC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0); |
467 | break; | 361 | |
468 | case AMDGPU_HPD_3: | ||
469 | WREG32(DC_HPD3_CONTROL, 0); | ||
470 | break; | ||
471 | case AMDGPU_HPD_4: | ||
472 | WREG32(DC_HPD4_CONTROL, 0); | ||
473 | break; | ||
474 | case AMDGPU_HPD_5: | ||
475 | WREG32(DC_HPD5_CONTROL, 0); | ||
476 | break; | ||
477 | case AMDGPU_HPD_6: | ||
478 | WREG32(DC_HPD6_CONTROL, 0); | ||
479 | break; | ||
480 | default: | ||
481 | break; | ||
482 | } | ||
483 | amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); | 362 | amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); |
484 | } | 363 | } |
485 | } | 364 | } |
@@ -611,12 +490,55 @@ static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev, | |||
611 | static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev, | 490 | static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev, |
612 | bool render) | 491 | bool render) |
613 | { | 492 | { |
614 | if (!render) | 493 | if (!render) |
615 | WREG32(R_000300_VGA_RENDER_CONTROL, | 494 | WREG32(R_000300_VGA_RENDER_CONTROL, |
616 | RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL); | 495 | RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL); |
617 | 496 | ||
618 | } | 497 | } |
619 | 498 | ||
499 | static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev) | ||
500 | { | ||
501 | int num_crtc = 0; | ||
502 | |||
503 | switch (adev->asic_type) { | ||
504 | case CHIP_TAHITI: | ||
505 | case CHIP_PITCAIRN: | ||
506 | case CHIP_VERDE: | ||
507 | num_crtc = 6; | ||
508 | break; | ||
509 | case CHIP_OLAND: | ||
510 | num_crtc = 2; | ||
511 | break; | ||
512 | default: | ||
513 | num_crtc = 0; | ||
514 | } | ||
515 | return num_crtc; | ||
516 | } | ||
517 | |||
518 | void dce_v6_0_disable_dce(struct amdgpu_device *adev) | ||
519 | { | ||
520 | /*Disable VGA render and enabled crtc, if has DCE engine*/ | ||
521 | if (amdgpu_atombios_has_dce_engine_info(adev)) { | ||
522 | u32 tmp; | ||
523 | int crtc_enabled, i; | ||
524 | |||
525 | dce_v6_0_set_vga_render_state(adev, false); | ||
526 | |||
527 | /*Disable crtc*/ | ||
528 | for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) { | ||
529 | crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & | ||
530 | EVERGREEN_CRTC_MASTER_EN; | ||
531 | if (crtc_enabled) { | ||
532 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); | ||
533 | tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); | ||
534 | tmp &= ~EVERGREEN_CRTC_MASTER_EN; | ||
535 | WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); | ||
536 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); | ||
537 | } | ||
538 | } | ||
539 | } | ||
540 | } | ||
541 | |||
620 | static void dce_v6_0_program_fmt(struct drm_encoder *encoder) | 542 | static void dce_v6_0_program_fmt(struct drm_encoder *encoder) |
621 | { | 543 | { |
622 | 544 | ||
@@ -2338,21 +2260,20 @@ static int dce_v6_0_early_init(void *handle) | |||
2338 | dce_v6_0_set_display_funcs(adev); | 2260 | dce_v6_0_set_display_funcs(adev); |
2339 | dce_v6_0_set_irq_funcs(adev); | 2261 | dce_v6_0_set_irq_funcs(adev); |
2340 | 2262 | ||
2263 | adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev); | ||
2264 | |||
2341 | switch (adev->asic_type) { | 2265 | switch (adev->asic_type) { |
2342 | case CHIP_TAHITI: | 2266 | case CHIP_TAHITI: |
2343 | case CHIP_PITCAIRN: | 2267 | case CHIP_PITCAIRN: |
2344 | case CHIP_VERDE: | 2268 | case CHIP_VERDE: |
2345 | adev->mode_info.num_crtc = 6; | ||
2346 | adev->mode_info.num_hpd = 6; | 2269 | adev->mode_info.num_hpd = 6; |
2347 | adev->mode_info.num_dig = 6; | 2270 | adev->mode_info.num_dig = 6; |
2348 | break; | 2271 | break; |
2349 | case CHIP_OLAND: | 2272 | case CHIP_OLAND: |
2350 | adev->mode_info.num_crtc = 2; | ||
2351 | adev->mode_info.num_hpd = 2; | 2273 | adev->mode_info.num_hpd = 2; |
2352 | adev->mode_info.num_dig = 2; | 2274 | adev->mode_info.num_dig = 2; |
2353 | break; | 2275 | break; |
2354 | default: | 2276 | default: |
2355 | /* FIXME: not supported yet */ | ||
2356 | return -EINVAL; | 2277 | return -EINVAL; |
2357 | } | 2278 | } |
2358 | 2279 | ||
@@ -2588,42 +2509,23 @@ static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev, | |||
2588 | unsigned type, | 2509 | unsigned type, |
2589 | enum amdgpu_interrupt_state state) | 2510 | enum amdgpu_interrupt_state state) |
2590 | { | 2511 | { |
2591 | u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl; | 2512 | u32 dc_hpd_int_cntl; |
2592 | 2513 | ||
2593 | switch (type) { | 2514 | if (type >= adev->mode_info.num_hpd) { |
2594 | case AMDGPU_HPD_1: | ||
2595 | dc_hpd_int_cntl_reg = DC_HPD1_INT_CONTROL; | ||
2596 | break; | ||
2597 | case AMDGPU_HPD_2: | ||
2598 | dc_hpd_int_cntl_reg = DC_HPD2_INT_CONTROL; | ||
2599 | break; | ||
2600 | case AMDGPU_HPD_3: | ||
2601 | dc_hpd_int_cntl_reg = DC_HPD3_INT_CONTROL; | ||
2602 | break; | ||
2603 | case AMDGPU_HPD_4: | ||
2604 | dc_hpd_int_cntl_reg = DC_HPD4_INT_CONTROL; | ||
2605 | break; | ||
2606 | case AMDGPU_HPD_5: | ||
2607 | dc_hpd_int_cntl_reg = DC_HPD5_INT_CONTROL; | ||
2608 | break; | ||
2609 | case AMDGPU_HPD_6: | ||
2610 | dc_hpd_int_cntl_reg = DC_HPD6_INT_CONTROL; | ||
2611 | break; | ||
2612 | default: | ||
2613 | DRM_DEBUG("invalid hdp %d\n", type); | 2515 | DRM_DEBUG("invalid hdp %d\n", type); |
2614 | return 0; | 2516 | return 0; |
2615 | } | 2517 | } |
2616 | 2518 | ||
2617 | switch (state) { | 2519 | switch (state) { |
2618 | case AMDGPU_IRQ_STATE_DISABLE: | 2520 | case AMDGPU_IRQ_STATE_DISABLE: |
2619 | dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); | 2521 | dc_hpd_int_cntl = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type]); |
2620 | dc_hpd_int_cntl &= ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); | 2522 | dc_hpd_int_cntl &= ~DC_HPDx_INT_EN; |
2621 | WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); | 2523 | WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl); |
2622 | break; | 2524 | break; |
2623 | case AMDGPU_IRQ_STATE_ENABLE: | 2525 | case AMDGPU_IRQ_STATE_ENABLE: |
2624 | dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); | 2526 | dc_hpd_int_cntl = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type]); |
2625 | dc_hpd_int_cntl |= (DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); | 2527 | dc_hpd_int_cntl |= DC_HPDx_INT_EN; |
2626 | WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); | 2528 | WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl); |
2627 | break; | 2529 | break; |
2628 | default: | 2530 | default: |
2629 | break; | 2531 | break; |
@@ -2796,7 +2698,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev, | |||
2796 | struct amdgpu_irq_src *source, | 2698 | struct amdgpu_irq_src *source, |
2797 | struct amdgpu_iv_entry *entry) | 2699 | struct amdgpu_iv_entry *entry) |
2798 | { | 2700 | { |
2799 | uint32_t disp_int, mask, int_control, tmp; | 2701 | uint32_t disp_int, mask, tmp; |
2800 | unsigned hpd; | 2702 | unsigned hpd; |
2801 | 2703 | ||
2802 | if (entry->src_data >= adev->mode_info.num_hpd) { | 2704 | if (entry->src_data >= adev->mode_info.num_hpd) { |
@@ -2807,12 +2709,11 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev, | |||
2807 | hpd = entry->src_data; | 2709 | hpd = entry->src_data; |
2808 | disp_int = RREG32(interrupt_status_offsets[hpd].reg); | 2710 | disp_int = RREG32(interrupt_status_offsets[hpd].reg); |
2809 | mask = interrupt_status_offsets[hpd].hpd; | 2711 | mask = interrupt_status_offsets[hpd].hpd; |
2810 | int_control = hpd_int_control_offsets[hpd]; | ||
2811 | 2712 | ||
2812 | if (disp_int & mask) { | 2713 | if (disp_int & mask) { |
2813 | tmp = RREG32(int_control); | 2714 | tmp = RREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd]); |
2814 | tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; | 2715 | tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; |
2815 | WREG32(int_control, tmp); | 2716 | WREG32(DC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); |
2816 | schedule_work(&adev->hotplug_work); | 2717 | schedule_work(&adev->hotplug_work); |
2817 | DRM_INFO("IH: HPD%d\n", hpd + 1); | 2718 | DRM_INFO("IH: HPD%d\n", hpd + 1); |
2818 | } | 2719 | } |
@@ -2833,7 +2734,7 @@ static int dce_v6_0_set_powergating_state(void *handle, | |||
2833 | return 0; | 2734 | return 0; |
2834 | } | 2735 | } |
2835 | 2736 | ||
2836 | const struct amd_ip_funcs dce_v6_0_ip_funcs = { | 2737 | static const struct amd_ip_funcs dce_v6_0_ip_funcs = { |
2837 | .name = "dce_v6_0", | 2738 | .name = "dce_v6_0", |
2838 | .early_init = dce_v6_0_early_init, | 2739 | .early_init = dce_v6_0_early_init, |
2839 | .late_init = NULL, | 2740 | .late_init = NULL, |
@@ -3174,3 +3075,21 @@ static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev) | |||
3174 | adev->hpd_irq.num_types = AMDGPU_HPD_LAST; | 3075 | adev->hpd_irq.num_types = AMDGPU_HPD_LAST; |
3175 | adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs; | 3076 | adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs; |
3176 | } | 3077 | } |
3078 | |||
3079 | const struct amdgpu_ip_block_version dce_v6_0_ip_block = | ||
3080 | { | ||
3081 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
3082 | .major = 6, | ||
3083 | .minor = 0, | ||
3084 | .rev = 0, | ||
3085 | .funcs = &dce_v6_0_ip_funcs, | ||
3086 | }; | ||
3087 | |||
3088 | const struct amdgpu_ip_block_version dce_v6_4_ip_block = | ||
3089 | { | ||
3090 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
3091 | .major = 6, | ||
3092 | .minor = 4, | ||
3093 | .rev = 0, | ||
3094 | .funcs = &dce_v6_0_ip_funcs, | ||
3095 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h index 6a5528105bb6..7b546b596de1 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.h | |||
@@ -24,6 +24,9 @@ | |||
24 | #ifndef __DCE_V6_0_H__ | 24 | #ifndef __DCE_V6_0_H__ |
25 | #define __DCE_V6_0_H__ | 25 | #define __DCE_V6_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs dce_v6_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version dce_v6_0_ip_block; |
28 | extern const struct amdgpu_ip_block_version dce_v6_4_ip_block; | ||
29 | |||
30 | void dce_v6_0_disable_dce(struct amdgpu_device *adev); | ||
28 | 31 | ||
29 | #endif | 32 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 5966166ec94c..979aedf4b74d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "atombios_encoders.h" | 31 | #include "atombios_encoders.h" |
32 | #include "amdgpu_pll.h" | 32 | #include "amdgpu_pll.h" |
33 | #include "amdgpu_connectors.h" | 33 | #include "amdgpu_connectors.h" |
34 | #include "dce_v8_0.h" | ||
34 | 35 | ||
35 | #include "dce/dce_8_0_d.h" | 36 | #include "dce/dce_8_0_d.h" |
36 | #include "dce/dce_8_0_sh_mask.h" | 37 | #include "dce/dce_8_0_sh_mask.h" |
@@ -56,6 +57,16 @@ static const u32 crtc_offsets[6] = | |||
56 | CRTC5_REGISTER_OFFSET | 57 | CRTC5_REGISTER_OFFSET |
57 | }; | 58 | }; |
58 | 59 | ||
60 | static const u32 hpd_offsets[] = | ||
61 | { | ||
62 | HPD0_REGISTER_OFFSET, | ||
63 | HPD1_REGISTER_OFFSET, | ||
64 | HPD2_REGISTER_OFFSET, | ||
65 | HPD3_REGISTER_OFFSET, | ||
66 | HPD4_REGISTER_OFFSET, | ||
67 | HPD5_REGISTER_OFFSET | ||
68 | }; | ||
69 | |||
59 | static const uint32_t dig_offsets[] = { | 70 | static const uint32_t dig_offsets[] = { |
60 | CRTC0_REGISTER_OFFSET, | 71 | CRTC0_REGISTER_OFFSET, |
61 | CRTC1_REGISTER_OFFSET, | 72 | CRTC1_REGISTER_OFFSET, |
@@ -104,15 +115,6 @@ static const struct { | |||
104 | .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK | 115 | .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK |
105 | } }; | 116 | } }; |
106 | 117 | ||
107 | static const uint32_t hpd_int_control_offsets[6] = { | ||
108 | mmDC_HPD1_INT_CONTROL, | ||
109 | mmDC_HPD2_INT_CONTROL, | ||
110 | mmDC_HPD3_INT_CONTROL, | ||
111 | mmDC_HPD4_INT_CONTROL, | ||
112 | mmDC_HPD5_INT_CONTROL, | ||
113 | mmDC_HPD6_INT_CONTROL, | ||
114 | }; | ||
115 | |||
116 | static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev, | 118 | static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev, |
117 | u32 block_offset, u32 reg) | 119 | u32 block_offset, u32 reg) |
118 | { | 120 | { |
@@ -278,34 +280,12 @@ static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev, | |||
278 | { | 280 | { |
279 | bool connected = false; | 281 | bool connected = false; |
280 | 282 | ||
281 | switch (hpd) { | 283 | if (hpd >= adev->mode_info.num_hpd) |
282 | case AMDGPU_HPD_1: | 284 | return connected; |
283 | if (RREG32(mmDC_HPD1_INT_STATUS) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK) | 285 | |
284 | connected = true; | 286 | if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & |
285 | break; | 287 | DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK) |
286 | case AMDGPU_HPD_2: | 288 | connected = true; |
287 | if (RREG32(mmDC_HPD2_INT_STATUS) & DC_HPD2_INT_STATUS__DC_HPD2_SENSE_MASK) | ||
288 | connected = true; | ||
289 | break; | ||
290 | case AMDGPU_HPD_3: | ||
291 | if (RREG32(mmDC_HPD3_INT_STATUS) & DC_HPD3_INT_STATUS__DC_HPD3_SENSE_MASK) | ||
292 | connected = true; | ||
293 | break; | ||
294 | case AMDGPU_HPD_4: | ||
295 | if (RREG32(mmDC_HPD4_INT_STATUS) & DC_HPD4_INT_STATUS__DC_HPD4_SENSE_MASK) | ||
296 | connected = true; | ||
297 | break; | ||
298 | case AMDGPU_HPD_5: | ||
299 | if (RREG32(mmDC_HPD5_INT_STATUS) & DC_HPD5_INT_STATUS__DC_HPD5_SENSE_MASK) | ||
300 | connected = true; | ||
301 | break; | ||
302 | case AMDGPU_HPD_6: | ||
303 | if (RREG32(mmDC_HPD6_INT_STATUS) & DC_HPD6_INT_STATUS__DC_HPD6_SENSE_MASK) | ||
304 | connected = true; | ||
305 | break; | ||
306 | default: | ||
307 | break; | ||
308 | } | ||
309 | 289 | ||
310 | return connected; | 290 | return connected; |
311 | } | 291 | } |
@@ -324,58 +304,15 @@ static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev, | |||
324 | u32 tmp; | 304 | u32 tmp; |
325 | bool connected = dce_v8_0_hpd_sense(adev, hpd); | 305 | bool connected = dce_v8_0_hpd_sense(adev, hpd); |
326 | 306 | ||
327 | switch (hpd) { | 307 | if (hpd >= adev->mode_info.num_hpd) |
328 | case AMDGPU_HPD_1: | 308 | return; |
329 | tmp = RREG32(mmDC_HPD1_INT_CONTROL); | 309 | |
330 | if (connected) | 310 | tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); |
331 | tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; | 311 | if (connected) |
332 | else | 312 | tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; |
333 | tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; | 313 | else |
334 | WREG32(mmDC_HPD1_INT_CONTROL, tmp); | 314 | tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; |
335 | break; | 315 | WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); |
336 | case AMDGPU_HPD_2: | ||
337 | tmp = RREG32(mmDC_HPD2_INT_CONTROL); | ||
338 | if (connected) | ||
339 | tmp &= ~DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK; | ||
340 | else | ||
341 | tmp |= DC_HPD2_INT_CONTROL__DC_HPD2_INT_POLARITY_MASK; | ||
342 | WREG32(mmDC_HPD2_INT_CONTROL, tmp); | ||
343 | break; | ||
344 | case AMDGPU_HPD_3: | ||
345 | tmp = RREG32(mmDC_HPD3_INT_CONTROL); | ||
346 | if (connected) | ||
347 | tmp &= ~DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK; | ||
348 | else | ||
349 | tmp |= DC_HPD3_INT_CONTROL__DC_HPD3_INT_POLARITY_MASK; | ||
350 | WREG32(mmDC_HPD3_INT_CONTROL, tmp); | ||
351 | break; | ||
352 | case AMDGPU_HPD_4: | ||
353 | tmp = RREG32(mmDC_HPD4_INT_CONTROL); | ||
354 | if (connected) | ||
355 | tmp &= ~DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK; | ||
356 | else | ||
357 | tmp |= DC_HPD4_INT_CONTROL__DC_HPD4_INT_POLARITY_MASK; | ||
358 | WREG32(mmDC_HPD4_INT_CONTROL, tmp); | ||
359 | break; | ||
360 | case AMDGPU_HPD_5: | ||
361 | tmp = RREG32(mmDC_HPD5_INT_CONTROL); | ||
362 | if (connected) | ||
363 | tmp &= ~DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK; | ||
364 | else | ||
365 | tmp |= DC_HPD5_INT_CONTROL__DC_HPD5_INT_POLARITY_MASK; | ||
366 | WREG32(mmDC_HPD5_INT_CONTROL, tmp); | ||
367 | break; | ||
368 | case AMDGPU_HPD_6: | ||
369 | tmp = RREG32(mmDC_HPD6_INT_CONTROL); | ||
370 | if (connected) | ||
371 | tmp &= ~DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK; | ||
372 | else | ||
373 | tmp |= DC_HPD6_INT_CONTROL__DC_HPD6_INT_POLARITY_MASK; | ||
374 | WREG32(mmDC_HPD6_INT_CONTROL, tmp); | ||
375 | break; | ||
376 | default: | ||
377 | break; | ||
378 | } | ||
379 | } | 316 | } |
380 | 317 | ||
381 | /** | 318 | /** |
@@ -390,35 +327,17 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev) | |||
390 | { | 327 | { |
391 | struct drm_device *dev = adev->ddev; | 328 | struct drm_device *dev = adev->ddev; |
392 | struct drm_connector *connector; | 329 | struct drm_connector *connector; |
393 | u32 tmp = (0x9c4 << DC_HPD1_CONTROL__DC_HPD1_CONNECTION_TIMER__SHIFT) | | 330 | u32 tmp; |
394 | (0xfa << DC_HPD1_CONTROL__DC_HPD1_RX_INT_TIMER__SHIFT) | | ||
395 | DC_HPD1_CONTROL__DC_HPD1_EN_MASK; | ||
396 | 331 | ||
397 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 332 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
398 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | 333 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); |
399 | 334 | ||
400 | switch (amdgpu_connector->hpd.hpd) { | 335 | if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) |
401 | case AMDGPU_HPD_1: | 336 | continue; |
402 | WREG32(mmDC_HPD1_CONTROL, tmp); | 337 | |
403 | break; | 338 | tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
404 | case AMDGPU_HPD_2: | 339 | tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK; |
405 | WREG32(mmDC_HPD2_CONTROL, tmp); | 340 | WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); |
406 | break; | ||
407 | case AMDGPU_HPD_3: | ||
408 | WREG32(mmDC_HPD3_CONTROL, tmp); | ||
409 | break; | ||
410 | case AMDGPU_HPD_4: | ||
411 | WREG32(mmDC_HPD4_CONTROL, tmp); | ||
412 | break; | ||
413 | case AMDGPU_HPD_5: | ||
414 | WREG32(mmDC_HPD5_CONTROL, tmp); | ||
415 | break; | ||
416 | case AMDGPU_HPD_6: | ||
417 | WREG32(mmDC_HPD6_CONTROL, tmp); | ||
418 | break; | ||
419 | default: | ||
420 | break; | ||
421 | } | ||
422 | 341 | ||
423 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || | 342 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || |
424 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { | 343 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { |
@@ -427,34 +346,9 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev) | |||
427 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 | 346 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 |
428 | * also avoid interrupt storms during dpms. | 347 | * also avoid interrupt storms during dpms. |
429 | */ | 348 | */ |
430 | u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl; | 349 | tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
431 | 350 | tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; | |
432 | switch (amdgpu_connector->hpd.hpd) { | 351 | WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); |
433 | case AMDGPU_HPD_1: | ||
434 | dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL; | ||
435 | break; | ||
436 | case AMDGPU_HPD_2: | ||
437 | dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL; | ||
438 | break; | ||
439 | case AMDGPU_HPD_3: | ||
440 | dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL; | ||
441 | break; | ||
442 | case AMDGPU_HPD_4: | ||
443 | dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL; | ||
444 | break; | ||
445 | case AMDGPU_HPD_5: | ||
446 | dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL; | ||
447 | break; | ||
448 | case AMDGPU_HPD_6: | ||
449 | dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL; | ||
450 | break; | ||
451 | default: | ||
452 | continue; | ||
453 | } | ||
454 | |||
455 | dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); | ||
456 | dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; | ||
457 | WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); | ||
458 | continue; | 352 | continue; |
459 | } | 353 | } |
460 | 354 | ||
@@ -475,32 +369,18 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev) | |||
475 | { | 369 | { |
476 | struct drm_device *dev = adev->ddev; | 370 | struct drm_device *dev = adev->ddev; |
477 | struct drm_connector *connector; | 371 | struct drm_connector *connector; |
372 | u32 tmp; | ||
478 | 373 | ||
479 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 374 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
480 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | 375 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); |
481 | 376 | ||
482 | switch (amdgpu_connector->hpd.hpd) { | 377 | if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) |
483 | case AMDGPU_HPD_1: | 378 | continue; |
484 | WREG32(mmDC_HPD1_CONTROL, 0); | 379 | |
485 | break; | 380 | tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); |
486 | case AMDGPU_HPD_2: | 381 | tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK; |
487 | WREG32(mmDC_HPD2_CONTROL, 0); | 382 | WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0); |
488 | break; | 383 | |
489 | case AMDGPU_HPD_3: | ||
490 | WREG32(mmDC_HPD3_CONTROL, 0); | ||
491 | break; | ||
492 | case AMDGPU_HPD_4: | ||
493 | WREG32(mmDC_HPD4_CONTROL, 0); | ||
494 | break; | ||
495 | case AMDGPU_HPD_5: | ||
496 | WREG32(mmDC_HPD5_CONTROL, 0); | ||
497 | break; | ||
498 | case AMDGPU_HPD_6: | ||
499 | WREG32(mmDC_HPD6_CONTROL, 0); | ||
500 | break; | ||
501 | default: | ||
502 | break; | ||
503 | } | ||
504 | amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); | 384 | amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); |
505 | } | 385 | } |
506 | } | 386 | } |
@@ -3204,42 +3084,23 @@ static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev, | |||
3204 | unsigned type, | 3084 | unsigned type, |
3205 | enum amdgpu_interrupt_state state) | 3085 | enum amdgpu_interrupt_state state) |
3206 | { | 3086 | { |
3207 | u32 dc_hpd_int_cntl_reg, dc_hpd_int_cntl; | 3087 | u32 dc_hpd_int_cntl; |
3208 | 3088 | ||
3209 | switch (type) { | 3089 | if (type >= adev->mode_info.num_hpd) { |
3210 | case AMDGPU_HPD_1: | ||
3211 | dc_hpd_int_cntl_reg = mmDC_HPD1_INT_CONTROL; | ||
3212 | break; | ||
3213 | case AMDGPU_HPD_2: | ||
3214 | dc_hpd_int_cntl_reg = mmDC_HPD2_INT_CONTROL; | ||
3215 | break; | ||
3216 | case AMDGPU_HPD_3: | ||
3217 | dc_hpd_int_cntl_reg = mmDC_HPD3_INT_CONTROL; | ||
3218 | break; | ||
3219 | case AMDGPU_HPD_4: | ||
3220 | dc_hpd_int_cntl_reg = mmDC_HPD4_INT_CONTROL; | ||
3221 | break; | ||
3222 | case AMDGPU_HPD_5: | ||
3223 | dc_hpd_int_cntl_reg = mmDC_HPD5_INT_CONTROL; | ||
3224 | break; | ||
3225 | case AMDGPU_HPD_6: | ||
3226 | dc_hpd_int_cntl_reg = mmDC_HPD6_INT_CONTROL; | ||
3227 | break; | ||
3228 | default: | ||
3229 | DRM_DEBUG("invalid hdp %d\n", type); | 3090 | DRM_DEBUG("invalid hdp %d\n", type); |
3230 | return 0; | 3091 | return 0; |
3231 | } | 3092 | } |
3232 | 3093 | ||
3233 | switch (state) { | 3094 | switch (state) { |
3234 | case AMDGPU_IRQ_STATE_DISABLE: | 3095 | case AMDGPU_IRQ_STATE_DISABLE: |
3235 | dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); | 3096 | dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]); |
3236 | dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; | 3097 | dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; |
3237 | WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); | 3098 | WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl); |
3238 | break; | 3099 | break; |
3239 | case AMDGPU_IRQ_STATE_ENABLE: | 3100 | case AMDGPU_IRQ_STATE_ENABLE: |
3240 | dc_hpd_int_cntl = RREG32(dc_hpd_int_cntl_reg); | 3101 | dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]); |
3241 | dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; | 3102 | dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; |
3242 | WREG32(dc_hpd_int_cntl_reg, dc_hpd_int_cntl); | 3103 | WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl); |
3243 | break; | 3104 | break; |
3244 | default: | 3105 | default: |
3245 | break; | 3106 | break; |
@@ -3412,7 +3273,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev, | |||
3412 | struct amdgpu_irq_src *source, | 3273 | struct amdgpu_irq_src *source, |
3413 | struct amdgpu_iv_entry *entry) | 3274 | struct amdgpu_iv_entry *entry) |
3414 | { | 3275 | { |
3415 | uint32_t disp_int, mask, int_control, tmp; | 3276 | uint32_t disp_int, mask, tmp; |
3416 | unsigned hpd; | 3277 | unsigned hpd; |
3417 | 3278 | ||
3418 | if (entry->src_data >= adev->mode_info.num_hpd) { | 3279 | if (entry->src_data >= adev->mode_info.num_hpd) { |
@@ -3423,12 +3284,11 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev, | |||
3423 | hpd = entry->src_data; | 3284 | hpd = entry->src_data; |
3424 | disp_int = RREG32(interrupt_status_offsets[hpd].reg); | 3285 | disp_int = RREG32(interrupt_status_offsets[hpd].reg); |
3425 | mask = interrupt_status_offsets[hpd].hpd; | 3286 | mask = interrupt_status_offsets[hpd].hpd; |
3426 | int_control = hpd_int_control_offsets[hpd]; | ||
3427 | 3287 | ||
3428 | if (disp_int & mask) { | 3288 | if (disp_int & mask) { |
3429 | tmp = RREG32(int_control); | 3289 | tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); |
3430 | tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; | 3290 | tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; |
3431 | WREG32(int_control, tmp); | 3291 | WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); |
3432 | schedule_work(&adev->hotplug_work); | 3292 | schedule_work(&adev->hotplug_work); |
3433 | DRM_DEBUG("IH: HPD%d\n", hpd + 1); | 3293 | DRM_DEBUG("IH: HPD%d\n", hpd + 1); |
3434 | } | 3294 | } |
@@ -3449,7 +3309,7 @@ static int dce_v8_0_set_powergating_state(void *handle, | |||
3449 | return 0; | 3309 | return 0; |
3450 | } | 3310 | } |
3451 | 3311 | ||
3452 | const struct amd_ip_funcs dce_v8_0_ip_funcs = { | 3312 | static const struct amd_ip_funcs dce_v8_0_ip_funcs = { |
3453 | .name = "dce_v8_0", | 3313 | .name = "dce_v8_0", |
3454 | .early_init = dce_v8_0_early_init, | 3314 | .early_init = dce_v8_0_early_init, |
3455 | .late_init = NULL, | 3315 | .late_init = NULL, |
@@ -3779,3 +3639,48 @@ static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev) | |||
3779 | adev->hpd_irq.num_types = AMDGPU_HPD_LAST; | 3639 | adev->hpd_irq.num_types = AMDGPU_HPD_LAST; |
3780 | adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs; | 3640 | adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs; |
3781 | } | 3641 | } |
3642 | |||
3643 | const struct amdgpu_ip_block_version dce_v8_0_ip_block = | ||
3644 | { | ||
3645 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
3646 | .major = 8, | ||
3647 | .minor = 0, | ||
3648 | .rev = 0, | ||
3649 | .funcs = &dce_v8_0_ip_funcs, | ||
3650 | }; | ||
3651 | |||
3652 | const struct amdgpu_ip_block_version dce_v8_1_ip_block = | ||
3653 | { | ||
3654 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
3655 | .major = 8, | ||
3656 | .minor = 1, | ||
3657 | .rev = 0, | ||
3658 | .funcs = &dce_v8_0_ip_funcs, | ||
3659 | }; | ||
3660 | |||
3661 | const struct amdgpu_ip_block_version dce_v8_2_ip_block = | ||
3662 | { | ||
3663 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
3664 | .major = 8, | ||
3665 | .minor = 2, | ||
3666 | .rev = 0, | ||
3667 | .funcs = &dce_v8_0_ip_funcs, | ||
3668 | }; | ||
3669 | |||
3670 | const struct amdgpu_ip_block_version dce_v8_3_ip_block = | ||
3671 | { | ||
3672 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
3673 | .major = 8, | ||
3674 | .minor = 3, | ||
3675 | .rev = 0, | ||
3676 | .funcs = &dce_v8_0_ip_funcs, | ||
3677 | }; | ||
3678 | |||
3679 | const struct amdgpu_ip_block_version dce_v8_5_ip_block = | ||
3680 | { | ||
3681 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
3682 | .major = 8, | ||
3683 | .minor = 5, | ||
3684 | .rev = 0, | ||
3685 | .funcs = &dce_v8_0_ip_funcs, | ||
3686 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h index 7d0770c3a49b..13b802dd946a 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.h | |||
@@ -24,7 +24,11 @@ | |||
24 | #ifndef __DCE_V8_0_H__ | 24 | #ifndef __DCE_V8_0_H__ |
25 | #define __DCE_V8_0_H__ | 25 | #define __DCE_V8_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs dce_v8_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version dce_v8_0_ip_block; |
28 | extern const struct amdgpu_ip_block_version dce_v8_1_ip_block; | ||
29 | extern const struct amdgpu_ip_block_version dce_v8_2_ip_block; | ||
30 | extern const struct amdgpu_ip_block_version dce_v8_3_ip_block; | ||
31 | extern const struct amdgpu_ip_block_version dce_v8_5_ip_block; | ||
28 | 32 | ||
29 | void dce_v8_0_disable_dce(struct amdgpu_device *adev); | 33 | void dce_v8_0_disable_dce(struct amdgpu_device *adev); |
30 | 34 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index c2bd9f045532..cc85676a68d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c | |||
@@ -27,6 +27,9 @@ | |||
27 | #include "atom.h" | 27 | #include "atom.h" |
28 | #include "amdgpu_pll.h" | 28 | #include "amdgpu_pll.h" |
29 | #include "amdgpu_connectors.h" | 29 | #include "amdgpu_connectors.h" |
30 | #ifdef CONFIG_DRM_AMDGPU_SI | ||
31 | #include "dce_v6_0.h" | ||
32 | #endif | ||
30 | #ifdef CONFIG_DRM_AMDGPU_CIK | 33 | #ifdef CONFIG_DRM_AMDGPU_CIK |
31 | #include "dce_v8_0.h" | 34 | #include "dce_v8_0.h" |
32 | #endif | 35 | #endif |
@@ -34,11 +37,13 @@ | |||
34 | #include "dce_v11_0.h" | 37 | #include "dce_v11_0.h" |
35 | #include "dce_virtual.h" | 38 | #include "dce_virtual.h" |
36 | 39 | ||
40 | #define DCE_VIRTUAL_VBLANK_PERIOD 16666666 | ||
41 | |||
42 | |||
37 | static void dce_virtual_set_display_funcs(struct amdgpu_device *adev); | 43 | static void dce_virtual_set_display_funcs(struct amdgpu_device *adev); |
38 | static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev); | 44 | static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev); |
39 | static int dce_virtual_pageflip_irq(struct amdgpu_device *adev, | 45 | static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev, |
40 | struct amdgpu_irq_src *source, | 46 | int index); |
41 | struct amdgpu_iv_entry *entry); | ||
42 | 47 | ||
43 | /** | 48 | /** |
44 | * dce_virtual_vblank_wait - vblank wait asic callback. | 49 | * dce_virtual_vblank_wait - vblank wait asic callback. |
@@ -99,6 +104,14 @@ static void dce_virtual_stop_mc_access(struct amdgpu_device *adev, | |||
99 | struct amdgpu_mode_mc_save *save) | 104 | struct amdgpu_mode_mc_save *save) |
100 | { | 105 | { |
101 | switch (adev->asic_type) { | 106 | switch (adev->asic_type) { |
107 | #ifdef CONFIG_DRM_AMDGPU_SI | ||
108 | case CHIP_TAHITI: | ||
109 | case CHIP_PITCAIRN: | ||
110 | case CHIP_VERDE: | ||
111 | case CHIP_OLAND: | ||
112 | dce_v6_0_disable_dce(adev); | ||
113 | break; | ||
114 | #endif | ||
102 | #ifdef CONFIG_DRM_AMDGPU_CIK | 115 | #ifdef CONFIG_DRM_AMDGPU_CIK |
103 | case CHIP_BONAIRE: | 116 | case CHIP_BONAIRE: |
104 | case CHIP_HAWAII: | 117 | case CHIP_HAWAII: |
@@ -119,6 +132,9 @@ static void dce_virtual_stop_mc_access(struct amdgpu_device *adev, | |||
119 | dce_v11_0_disable_dce(adev); | 132 | dce_v11_0_disable_dce(adev); |
120 | break; | 133 | break; |
121 | case CHIP_TOPAZ: | 134 | case CHIP_TOPAZ: |
135 | #ifdef CONFIG_DRM_AMDGPU_SI | ||
136 | case CHIP_HAINAN: | ||
137 | #endif | ||
122 | /* no DCE */ | 138 | /* no DCE */ |
123 | return; | 139 | return; |
124 | default: | 140 | default: |
@@ -195,10 +211,9 @@ static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
195 | switch (mode) { | 211 | switch (mode) { |
196 | case DRM_MODE_DPMS_ON: | 212 | case DRM_MODE_DPMS_ON: |
197 | amdgpu_crtc->enabled = true; | 213 | amdgpu_crtc->enabled = true; |
198 | /* Make sure VBLANK and PFLIP interrupts are still enabled */ | 214 | /* Make sure VBLANK interrupts are still enabled */ |
199 | type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); | 215 | type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); |
200 | amdgpu_irq_update(adev, &adev->crtc_irq, type); | 216 | amdgpu_irq_update(adev, &adev->crtc_irq, type); |
201 | amdgpu_irq_update(adev, &adev->pageflip_irq, type); | ||
202 | drm_vblank_on(dev, amdgpu_crtc->crtc_id); | 217 | drm_vblank_on(dev, amdgpu_crtc->crtc_id); |
203 | break; | 218 | break; |
204 | case DRM_MODE_DPMS_STANDBY: | 219 | case DRM_MODE_DPMS_STANDBY: |
@@ -264,24 +279,6 @@ static bool dce_virtual_crtc_mode_fixup(struct drm_crtc *crtc, | |||
264 | const struct drm_display_mode *mode, | 279 | const struct drm_display_mode *mode, |
265 | struct drm_display_mode *adjusted_mode) | 280 | struct drm_display_mode *adjusted_mode) |
266 | { | 281 | { |
267 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
268 | struct drm_device *dev = crtc->dev; | ||
269 | struct drm_encoder *encoder; | ||
270 | |||
271 | /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */ | ||
272 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
273 | if (encoder->crtc == crtc) { | ||
274 | amdgpu_crtc->encoder = encoder; | ||
275 | amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder); | ||
276 | break; | ||
277 | } | ||
278 | } | ||
279 | if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) { | ||
280 | amdgpu_crtc->encoder = NULL; | ||
281 | amdgpu_crtc->connector = NULL; | ||
282 | return false; | ||
283 | } | ||
284 | |||
285 | return true; | 282 | return true; |
286 | } | 283 | } |
287 | 284 | ||
@@ -341,6 +338,7 @@ static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index) | |||
341 | amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; | 338 | amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; |
342 | amdgpu_crtc->encoder = NULL; | 339 | amdgpu_crtc->encoder = NULL; |
343 | amdgpu_crtc->connector = NULL; | 340 | amdgpu_crtc->connector = NULL; |
341 | amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE; | ||
344 | drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs); | 342 | drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs); |
345 | 343 | ||
346 | return 0; | 344 | return 0; |
@@ -350,48 +348,128 @@ static int dce_virtual_early_init(void *handle) | |||
350 | { | 348 | { |
351 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 349 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
352 | 350 | ||
353 | adev->mode_info.vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE; | ||
354 | dce_virtual_set_display_funcs(adev); | 351 | dce_virtual_set_display_funcs(adev); |
355 | dce_virtual_set_irq_funcs(adev); | 352 | dce_virtual_set_irq_funcs(adev); |
356 | 353 | ||
357 | adev->mode_info.num_crtc = 1; | ||
358 | adev->mode_info.num_hpd = 1; | 354 | adev->mode_info.num_hpd = 1; |
359 | adev->mode_info.num_dig = 1; | 355 | adev->mode_info.num_dig = 1; |
360 | return 0; | 356 | return 0; |
361 | } | 357 | } |
362 | 358 | ||
363 | static bool dce_virtual_get_connector_info(struct amdgpu_device *adev) | 359 | static struct drm_encoder * |
360 | dce_virtual_encoder(struct drm_connector *connector) | ||
364 | { | 361 | { |
365 | struct amdgpu_i2c_bus_rec ddc_bus; | 362 | int enc_id = connector->encoder_ids[0]; |
366 | struct amdgpu_router router; | 363 | struct drm_encoder *encoder; |
367 | struct amdgpu_hpd hpd; | 364 | int i; |
368 | 365 | ||
369 | /* look up gpio for ddc, hpd */ | 366 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { |
370 | ddc_bus.valid = false; | 367 | if (connector->encoder_ids[i] == 0) |
371 | hpd.hpd = AMDGPU_HPD_NONE; | 368 | break; |
372 | /* needed for aux chan transactions */ | ||
373 | ddc_bus.hpd = hpd.hpd; | ||
374 | 369 | ||
375 | memset(&router, 0, sizeof(router)); | 370 | encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]); |
376 | router.ddc_valid = false; | 371 | if (!encoder) |
377 | router.cd_valid = false; | 372 | continue; |
378 | amdgpu_display_add_connector(adev, | ||
379 | 0, | ||
380 | ATOM_DEVICE_CRT1_SUPPORT, | ||
381 | DRM_MODE_CONNECTOR_VIRTUAL, &ddc_bus, | ||
382 | CONNECTOR_OBJECT_ID_VIRTUAL, | ||
383 | &hpd, | ||
384 | &router); | ||
385 | 373 | ||
386 | amdgpu_display_add_encoder(adev, ENCODER_VIRTUAL_ENUM_VIRTUAL, | 374 | if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL) |
387 | ATOM_DEVICE_CRT1_SUPPORT, | 375 | return encoder; |
388 | 0); | 376 | } |
389 | 377 | ||
390 | amdgpu_link_encoder_connector(adev->ddev); | 378 | /* pick the first one */ |
379 | if (enc_id) | ||
380 | return drm_encoder_find(connector->dev, enc_id); | ||
381 | return NULL; | ||
382 | } | ||
383 | |||
384 | static int dce_virtual_get_modes(struct drm_connector *connector) | ||
385 | { | ||
386 | struct drm_device *dev = connector->dev; | ||
387 | struct drm_display_mode *mode = NULL; | ||
388 | unsigned i; | ||
389 | static const struct mode_size { | ||
390 | int w; | ||
391 | int h; | ||
392 | } common_modes[17] = { | ||
393 | { 640, 480}, | ||
394 | { 720, 480}, | ||
395 | { 800, 600}, | ||
396 | { 848, 480}, | ||
397 | {1024, 768}, | ||
398 | {1152, 768}, | ||
399 | {1280, 720}, | ||
400 | {1280, 800}, | ||
401 | {1280, 854}, | ||
402 | {1280, 960}, | ||
403 | {1280, 1024}, | ||
404 | {1440, 900}, | ||
405 | {1400, 1050}, | ||
406 | {1680, 1050}, | ||
407 | {1600, 1200}, | ||
408 | {1920, 1080}, | ||
409 | {1920, 1200} | ||
410 | }; | ||
411 | |||
412 | for (i = 0; i < 17; i++) { | ||
413 | mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false); | ||
414 | drm_mode_probed_add(connector, mode); | ||
415 | } | ||
391 | 416 | ||
392 | return true; | 417 | return 0; |
418 | } | ||
419 | |||
420 | static int dce_virtual_mode_valid(struct drm_connector *connector, | ||
421 | struct drm_display_mode *mode) | ||
422 | { | ||
423 | return MODE_OK; | ||
424 | } | ||
425 | |||
426 | static int | ||
427 | dce_virtual_dpms(struct drm_connector *connector, int mode) | ||
428 | { | ||
429 | return 0; | ||
393 | } | 430 | } |
394 | 431 | ||
432 | static enum drm_connector_status | ||
433 | dce_virtual_detect(struct drm_connector *connector, bool force) | ||
434 | { | ||
435 | return connector_status_connected; | ||
436 | } | ||
437 | |||
438 | static int | ||
439 | dce_virtual_set_property(struct drm_connector *connector, | ||
440 | struct drm_property *property, | ||
441 | uint64_t val) | ||
442 | { | ||
443 | return 0; | ||
444 | } | ||
445 | |||
446 | static void dce_virtual_destroy(struct drm_connector *connector) | ||
447 | { | ||
448 | drm_connector_unregister(connector); | ||
449 | drm_connector_cleanup(connector); | ||
450 | kfree(connector); | ||
451 | } | ||
452 | |||
453 | static void dce_virtual_force(struct drm_connector *connector) | ||
454 | { | ||
455 | return; | ||
456 | } | ||
457 | |||
458 | static const struct drm_connector_helper_funcs dce_virtual_connector_helper_funcs = { | ||
459 | .get_modes = dce_virtual_get_modes, | ||
460 | .mode_valid = dce_virtual_mode_valid, | ||
461 | .best_encoder = dce_virtual_encoder, | ||
462 | }; | ||
463 | |||
464 | static const struct drm_connector_funcs dce_virtual_connector_funcs = { | ||
465 | .dpms = dce_virtual_dpms, | ||
466 | .detect = dce_virtual_detect, | ||
467 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
468 | .set_property = dce_virtual_set_property, | ||
469 | .destroy = dce_virtual_destroy, | ||
470 | .force = dce_virtual_force, | ||
471 | }; | ||
472 | |||
395 | static int dce_virtual_sw_init(void *handle) | 473 | static int dce_virtual_sw_init(void *handle) |
396 | { | 474 | { |
397 | int r, i; | 475 | int r, i; |
@@ -420,16 +498,16 @@ static int dce_virtual_sw_init(void *handle) | |||
420 | adev->ddev->mode_config.max_width = 16384; | 498 | adev->ddev->mode_config.max_width = 16384; |
421 | adev->ddev->mode_config.max_height = 16384; | 499 | adev->ddev->mode_config.max_height = 16384; |
422 | 500 | ||
423 | /* allocate crtcs */ | 501 | /* allocate crtcs, encoders, connectors */ |
424 | for (i = 0; i < adev->mode_info.num_crtc; i++) { | 502 | for (i = 0; i < adev->mode_info.num_crtc; i++) { |
425 | r = dce_virtual_crtc_init(adev, i); | 503 | r = dce_virtual_crtc_init(adev, i); |
426 | if (r) | 504 | if (r) |
427 | return r; | 505 | return r; |
506 | r = dce_virtual_connector_encoder_init(adev, i); | ||
507 | if (r) | ||
508 | return r; | ||
428 | } | 509 | } |
429 | 510 | ||
430 | dce_virtual_get_connector_info(adev); | ||
431 | amdgpu_print_display_setup(adev->ddev); | ||
432 | |||
433 | drm_kms_helper_poll_init(adev->ddev); | 511 | drm_kms_helper_poll_init(adev->ddev); |
434 | 512 | ||
435 | adev->mode_info.mode_config_initialized = true; | 513 | adev->mode_info.mode_config_initialized = true; |
@@ -496,7 +574,7 @@ static int dce_virtual_set_powergating_state(void *handle, | |||
496 | return 0; | 574 | return 0; |
497 | } | 575 | } |
498 | 576 | ||
499 | const struct amd_ip_funcs dce_virtual_ip_funcs = { | 577 | static const struct amd_ip_funcs dce_virtual_ip_funcs = { |
500 | .name = "dce_virtual", | 578 | .name = "dce_virtual", |
501 | .early_init = dce_virtual_early_init, | 579 | .early_init = dce_virtual_early_init, |
502 | .late_init = NULL, | 580 | .late_init = NULL, |
@@ -526,8 +604,8 @@ static void dce_virtual_encoder_commit(struct drm_encoder *encoder) | |||
526 | 604 | ||
527 | static void | 605 | static void |
528 | dce_virtual_encoder_mode_set(struct drm_encoder *encoder, | 606 | dce_virtual_encoder_mode_set(struct drm_encoder *encoder, |
529 | struct drm_display_mode *mode, | 607 | struct drm_display_mode *mode, |
530 | struct drm_display_mode *adjusted_mode) | 608 | struct drm_display_mode *adjusted_mode) |
531 | { | 609 | { |
532 | return; | 610 | return; |
533 | } | 611 | } |
@@ -547,10 +625,6 @@ static bool dce_virtual_encoder_mode_fixup(struct drm_encoder *encoder, | |||
547 | const struct drm_display_mode *mode, | 625 | const struct drm_display_mode *mode, |
548 | struct drm_display_mode *adjusted_mode) | 626 | struct drm_display_mode *adjusted_mode) |
549 | { | 627 | { |
550 | |||
551 | /* set the active encoder to connector routing */ | ||
552 | amdgpu_encoder_set_active_device(encoder); | ||
553 | |||
554 | return true; | 628 | return true; |
555 | } | 629 | } |
556 | 630 | ||
@@ -576,45 +650,40 @@ static const struct drm_encoder_funcs dce_virtual_encoder_funcs = { | |||
576 | .destroy = dce_virtual_encoder_destroy, | 650 | .destroy = dce_virtual_encoder_destroy, |
577 | }; | 651 | }; |
578 | 652 | ||
579 | static void dce_virtual_encoder_add(struct amdgpu_device *adev, | 653 | static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev, |
580 | uint32_t encoder_enum, | 654 | int index) |
581 | uint32_t supported_device, | ||
582 | u16 caps) | ||
583 | { | 655 | { |
584 | struct drm_device *dev = adev->ddev; | ||
585 | struct drm_encoder *encoder; | 656 | struct drm_encoder *encoder; |
586 | struct amdgpu_encoder *amdgpu_encoder; | 657 | struct drm_connector *connector; |
587 | 658 | ||
588 | /* see if we already added it */ | 659 | /* add a new encoder */ |
589 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 660 | encoder = kzalloc(sizeof(struct drm_encoder), GFP_KERNEL); |
590 | amdgpu_encoder = to_amdgpu_encoder(encoder); | 661 | if (!encoder) |
591 | if (amdgpu_encoder->encoder_enum == encoder_enum) { | 662 | return -ENOMEM; |
592 | amdgpu_encoder->devices |= supported_device; | 663 | encoder->possible_crtcs = 1 << index; |
593 | return; | 664 | drm_encoder_init(adev->ddev, encoder, &dce_virtual_encoder_funcs, |
594 | } | 665 | DRM_MODE_ENCODER_VIRTUAL, NULL); |
666 | drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs); | ||
595 | 667 | ||
668 | connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL); | ||
669 | if (!connector) { | ||
670 | kfree(encoder); | ||
671 | return -ENOMEM; | ||
596 | } | 672 | } |
597 | 673 | ||
598 | /* add a new one */ | 674 | /* add a new connector */ |
599 | amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL); | 675 | drm_connector_init(adev->ddev, connector, &dce_virtual_connector_funcs, |
600 | if (!amdgpu_encoder) | 676 | DRM_MODE_CONNECTOR_VIRTUAL); |
601 | return; | 677 | drm_connector_helper_add(connector, &dce_virtual_connector_helper_funcs); |
678 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | ||
679 | connector->interlace_allowed = false; | ||
680 | connector->doublescan_allowed = false; | ||
681 | drm_connector_register(connector); | ||
602 | 682 | ||
603 | encoder = &amdgpu_encoder->base; | 683 | /* link them */ |
604 | encoder->possible_crtcs = 0x1; | 684 | drm_mode_connector_attach_encoder(connector, encoder); |
605 | amdgpu_encoder->enc_priv = NULL; | 685 | |
606 | amdgpu_encoder->encoder_enum = encoder_enum; | 686 | return 0; |
607 | amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; | ||
608 | amdgpu_encoder->devices = supported_device; | ||
609 | amdgpu_encoder->rmx_type = RMX_OFF; | ||
610 | amdgpu_encoder->underscan_type = UNDERSCAN_OFF; | ||
611 | amdgpu_encoder->is_ext_encoder = false; | ||
612 | amdgpu_encoder->caps = caps; | ||
613 | |||
614 | drm_encoder_init(dev, encoder, &dce_virtual_encoder_funcs, | ||
615 | DRM_MODE_ENCODER_VIRTUAL, NULL); | ||
616 | drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs); | ||
617 | DRM_INFO("[FM]encoder: %d is VIRTUAL\n", amdgpu_encoder->encoder_id); | ||
618 | } | 687 | } |
619 | 688 | ||
620 | static const struct amdgpu_display_funcs dce_virtual_display_funcs = { | 689 | static const struct amdgpu_display_funcs dce_virtual_display_funcs = { |
@@ -630,8 +699,8 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = { | |||
630 | .hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg, | 699 | .hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg, |
631 | .page_flip = &dce_virtual_page_flip, | 700 | .page_flip = &dce_virtual_page_flip, |
632 | .page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos, | 701 | .page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos, |
633 | .add_encoder = &dce_virtual_encoder_add, | 702 | .add_encoder = NULL, |
634 | .add_connector = &amdgpu_connector_add, | 703 | .add_connector = NULL, |
635 | .stop_mc_access = &dce_virtual_stop_mc_access, | 704 | .stop_mc_access = &dce_virtual_stop_mc_access, |
636 | .resume_mc_access = &dce_virtual_resume_mc_access, | 705 | .resume_mc_access = &dce_virtual_resume_mc_access, |
637 | }; | 706 | }; |
@@ -642,107 +711,13 @@ static void dce_virtual_set_display_funcs(struct amdgpu_device *adev) | |||
642 | adev->mode_info.funcs = &dce_virtual_display_funcs; | 711 | adev->mode_info.funcs = &dce_virtual_display_funcs; |
643 | } | 712 | } |
644 | 713 | ||
645 | static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer) | 714 | static int dce_virtual_pageflip(struct amdgpu_device *adev, |
646 | { | 715 | unsigned crtc_id) |
647 | struct amdgpu_mode_info *mode_info = container_of(vblank_timer, struct amdgpu_mode_info ,vblank_timer); | ||
648 | struct amdgpu_device *adev = container_of(mode_info, struct amdgpu_device ,mode_info); | ||
649 | unsigned crtc = 0; | ||
650 | drm_handle_vblank(adev->ddev, crtc); | ||
651 | dce_virtual_pageflip_irq(adev, NULL, NULL); | ||
652 | hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL); | ||
653 | return HRTIMER_NORESTART; | ||
654 | } | ||
655 | |||
656 | static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, | ||
657 | int crtc, | ||
658 | enum amdgpu_interrupt_state state) | ||
659 | { | ||
660 | if (crtc >= adev->mode_info.num_crtc) { | ||
661 | DRM_DEBUG("invalid crtc %d\n", crtc); | ||
662 | return; | ||
663 | } | ||
664 | |||
665 | if (state && !adev->mode_info.vsync_timer_enabled) { | ||
666 | DRM_DEBUG("Enable software vsync timer\n"); | ||
667 | hrtimer_init(&adev->mode_info.vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
668 | hrtimer_set_expires(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD)); | ||
669 | adev->mode_info.vblank_timer.function = dce_virtual_vblank_timer_handle; | ||
670 | hrtimer_start(&adev->mode_info.vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL); | ||
671 | } else if (!state && adev->mode_info.vsync_timer_enabled) { | ||
672 | DRM_DEBUG("Disable software vsync timer\n"); | ||
673 | hrtimer_cancel(&adev->mode_info.vblank_timer); | ||
674 | } | ||
675 | |||
676 | adev->mode_info.vsync_timer_enabled = state; | ||
677 | DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state); | ||
678 | } | ||
679 | |||
680 | |||
681 | static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev, | ||
682 | struct amdgpu_irq_src *source, | ||
683 | unsigned type, | ||
684 | enum amdgpu_interrupt_state state) | ||
685 | { | ||
686 | switch (type) { | ||
687 | case AMDGPU_CRTC_IRQ_VBLANK1: | ||
688 | dce_virtual_set_crtc_vblank_interrupt_state(adev, 0, state); | ||
689 | break; | ||
690 | default: | ||
691 | break; | ||
692 | } | ||
693 | return 0; | ||
694 | } | ||
695 | |||
696 | static void dce_virtual_crtc_vblank_int_ack(struct amdgpu_device *adev, | ||
697 | int crtc) | ||
698 | { | ||
699 | if (crtc >= adev->mode_info.num_crtc) { | ||
700 | DRM_DEBUG("invalid crtc %d\n", crtc); | ||
701 | return; | ||
702 | } | ||
703 | } | ||
704 | |||
705 | static int dce_virtual_crtc_irq(struct amdgpu_device *adev, | ||
706 | struct amdgpu_irq_src *source, | ||
707 | struct amdgpu_iv_entry *entry) | ||
708 | { | ||
709 | unsigned crtc = 0; | ||
710 | unsigned irq_type = AMDGPU_CRTC_IRQ_VBLANK1; | ||
711 | |||
712 | dce_virtual_crtc_vblank_int_ack(adev, crtc); | ||
713 | |||
714 | if (amdgpu_irq_enabled(adev, source, irq_type)) { | ||
715 | drm_handle_vblank(adev->ddev, crtc); | ||
716 | } | ||
717 | dce_virtual_pageflip_irq(adev, NULL, NULL); | ||
718 | DRM_DEBUG("IH: D%d vblank\n", crtc + 1); | ||
719 | return 0; | ||
720 | } | ||
721 | |||
722 | static int dce_virtual_set_pageflip_irq_state(struct amdgpu_device *adev, | ||
723 | struct amdgpu_irq_src *src, | ||
724 | unsigned type, | ||
725 | enum amdgpu_interrupt_state state) | ||
726 | { | ||
727 | if (type >= adev->mode_info.num_crtc) { | ||
728 | DRM_ERROR("invalid pageflip crtc %d\n", type); | ||
729 | return -EINVAL; | ||
730 | } | ||
731 | DRM_DEBUG("[FM]set pageflip irq type %d state %d\n", type, state); | ||
732 | |||
733 | return 0; | ||
734 | } | ||
735 | |||
736 | static int dce_virtual_pageflip_irq(struct amdgpu_device *adev, | ||
737 | struct amdgpu_irq_src *source, | ||
738 | struct amdgpu_iv_entry *entry) | ||
739 | { | 716 | { |
740 | unsigned long flags; | 717 | unsigned long flags; |
741 | unsigned crtc_id = 0; | ||
742 | struct amdgpu_crtc *amdgpu_crtc; | 718 | struct amdgpu_crtc *amdgpu_crtc; |
743 | struct amdgpu_flip_work *works; | 719 | struct amdgpu_flip_work *works; |
744 | 720 | ||
745 | crtc_id = 0; | ||
746 | amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; | 721 | amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; |
747 | 722 | ||
748 | if (crtc_id >= adev->mode_info.num_crtc) { | 723 | if (crtc_id >= adev->mode_info.num_crtc) { |
@@ -781,22 +756,79 @@ static int dce_virtual_pageflip_irq(struct amdgpu_device *adev, | |||
781 | return 0; | 756 | return 0; |
782 | } | 757 | } |
783 | 758 | ||
759 | static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer) | ||
760 | { | ||
761 | struct amdgpu_crtc *amdgpu_crtc = container_of(vblank_timer, | ||
762 | struct amdgpu_crtc, vblank_timer); | ||
763 | struct drm_device *ddev = amdgpu_crtc->base.dev; | ||
764 | struct amdgpu_device *adev = ddev->dev_private; | ||
765 | |||
766 | drm_handle_vblank(ddev, amdgpu_crtc->crtc_id); | ||
767 | dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id); | ||
768 | hrtimer_start(vblank_timer, ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), | ||
769 | HRTIMER_MODE_REL); | ||
770 | |||
771 | return HRTIMER_NORESTART; | ||
772 | } | ||
773 | |||
774 | static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, | ||
775 | int crtc, | ||
776 | enum amdgpu_interrupt_state state) | ||
777 | { | ||
778 | if (crtc >= adev->mode_info.num_crtc) { | ||
779 | DRM_DEBUG("invalid crtc %d\n", crtc); | ||
780 | return; | ||
781 | } | ||
782 | |||
783 | if (state && !adev->mode_info.crtcs[crtc]->vsync_timer_enabled) { | ||
784 | DRM_DEBUG("Enable software vsync timer\n"); | ||
785 | hrtimer_init(&adev->mode_info.crtcs[crtc]->vblank_timer, | ||
786 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
787 | hrtimer_set_expires(&adev->mode_info.crtcs[crtc]->vblank_timer, | ||
788 | ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD)); | ||
789 | adev->mode_info.crtcs[crtc]->vblank_timer.function = | ||
790 | dce_virtual_vblank_timer_handle; | ||
791 | hrtimer_start(&adev->mode_info.crtcs[crtc]->vblank_timer, | ||
792 | ktime_set(0, DCE_VIRTUAL_VBLANK_PERIOD), HRTIMER_MODE_REL); | ||
793 | } else if (!state && adev->mode_info.crtcs[crtc]->vsync_timer_enabled) { | ||
794 | DRM_DEBUG("Disable software vsync timer\n"); | ||
795 | hrtimer_cancel(&adev->mode_info.crtcs[crtc]->vblank_timer); | ||
796 | } | ||
797 | |||
798 | adev->mode_info.crtcs[crtc]->vsync_timer_enabled = state; | ||
799 | DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state); | ||
800 | } | ||
801 | |||
802 | |||
803 | static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev, | ||
804 | struct amdgpu_irq_src *source, | ||
805 | unsigned type, | ||
806 | enum amdgpu_interrupt_state state) | ||
807 | { | ||
808 | if (type > AMDGPU_CRTC_IRQ_VBLANK6) | ||
809 | return -EINVAL; | ||
810 | |||
811 | dce_virtual_set_crtc_vblank_interrupt_state(adev, type, state); | ||
812 | |||
813 | return 0; | ||
814 | } | ||
815 | |||
784 | static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = { | 816 | static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = { |
785 | .set = dce_virtual_set_crtc_irq_state, | 817 | .set = dce_virtual_set_crtc_irq_state, |
786 | .process = dce_virtual_crtc_irq, | 818 | .process = NULL, |
787 | }; | ||
788 | |||
789 | static const struct amdgpu_irq_src_funcs dce_virtual_pageflip_irq_funcs = { | ||
790 | .set = dce_virtual_set_pageflip_irq_state, | ||
791 | .process = dce_virtual_pageflip_irq, | ||
792 | }; | 819 | }; |
793 | 820 | ||
794 | static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev) | 821 | static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev) |
795 | { | 822 | { |
796 | adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST; | 823 | adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST; |
797 | adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs; | 824 | adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs; |
798 | |||
799 | adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST; | ||
800 | adev->pageflip_irq.funcs = &dce_virtual_pageflip_irq_funcs; | ||
801 | } | 825 | } |
802 | 826 | ||
827 | const struct amdgpu_ip_block_version dce_virtual_ip_block = | ||
828 | { | ||
829 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
830 | .major = 1, | ||
831 | .minor = 0, | ||
832 | .rev = 0, | ||
833 | .funcs = &dce_virtual_ip_funcs, | ||
834 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h index e239243f6ebc..ed422012c8c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h | |||
@@ -24,8 +24,7 @@ | |||
24 | #ifndef __DCE_VIRTUAL_H__ | 24 | #ifndef __DCE_VIRTUAL_H__ |
25 | #define __DCE_VIRTUAL_H__ | 25 | #define __DCE_VIRTUAL_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs dce_virtual_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version dce_virtual_ip_block; |
28 | #define DCE_VIRTUAL_VBLANK_PERIOD 16666666 | ||
29 | 28 | ||
30 | #endif | 29 | #endif |
31 | 30 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 40abb6b81c09..96dd05dca694 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | |||
@@ -1940,7 +1940,7 @@ static int gfx_v6_0_cp_resume(struct amdgpu_device *adev) | |||
1940 | 1940 | ||
1941 | static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) | 1941 | static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) |
1942 | { | 1942 | { |
1943 | int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); | 1943 | int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); |
1944 | uint32_t seq = ring->fence_drv.sync_seq; | 1944 | uint32_t seq = ring->fence_drv.sync_seq; |
1945 | uint64_t addr = ring->fence_drv.gpu_addr; | 1945 | uint64_t addr = ring->fence_drv.gpu_addr; |
1946 | 1946 | ||
@@ -1966,7 +1966,7 @@ static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) | |||
1966 | static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, | 1966 | static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, |
1967 | unsigned vm_id, uint64_t pd_addr) | 1967 | unsigned vm_id, uint64_t pd_addr) |
1968 | { | 1968 | { |
1969 | int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); | 1969 | int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); |
1970 | 1970 | ||
1971 | /* write new base address */ | 1971 | /* write new base address */ |
1972 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | 1972 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
@@ -2814,33 +2814,6 @@ static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) | |||
2814 | amdgpu_ring_write(ring, 0); | 2814 | amdgpu_ring_write(ring, 0); |
2815 | } | 2815 | } |
2816 | 2816 | ||
2817 | static unsigned gfx_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring) | ||
2818 | { | ||
2819 | return | ||
2820 | 6; /* gfx_v6_0_ring_emit_ib */ | ||
2821 | } | ||
2822 | |||
2823 | static unsigned gfx_v6_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring) | ||
2824 | { | ||
2825 | return | ||
2826 | 5 + /* gfx_v6_0_ring_emit_hdp_flush */ | ||
2827 | 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */ | ||
2828 | 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */ | ||
2829 | 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */ | ||
2830 | 17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */ | ||
2831 | 3; /* gfx_v6_ring_emit_cntxcntl */ | ||
2832 | } | ||
2833 | |||
2834 | static unsigned gfx_v6_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring) | ||
2835 | { | ||
2836 | return | ||
2837 | 5 + /* gfx_v6_0_ring_emit_hdp_flush */ | ||
2838 | 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */ | ||
2839 | 7 + /* gfx_v6_0_ring_emit_pipeline_sync */ | ||
2840 | 17 + /* gfx_v6_0_ring_emit_vm_flush */ | ||
2841 | 14 + 14 + 14; /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */ | ||
2842 | } | ||
2843 | |||
2844 | static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = { | 2817 | static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = { |
2845 | .get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter, | 2818 | .get_gpu_clock_counter = &gfx_v6_0_get_gpu_clock_counter, |
2846 | .select_se_sh = &gfx_v6_0_select_se_sh, | 2819 | .select_se_sh = &gfx_v6_0_select_se_sh, |
@@ -2896,9 +2869,7 @@ static int gfx_v6_0_sw_init(void *handle) | |||
2896 | ring->ring_obj = NULL; | 2869 | ring->ring_obj = NULL; |
2897 | sprintf(ring->name, "gfx"); | 2870 | sprintf(ring->name, "gfx"); |
2898 | r = amdgpu_ring_init(adev, ring, 1024, | 2871 | r = amdgpu_ring_init(adev, ring, 1024, |
2899 | 0x80000000, 0xf, | 2872 | &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP); |
2900 | &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP, | ||
2901 | AMDGPU_RING_TYPE_GFX); | ||
2902 | if (r) | 2873 | if (r) |
2903 | return r; | 2874 | return r; |
2904 | } | 2875 | } |
@@ -2920,9 +2891,7 @@ static int gfx_v6_0_sw_init(void *handle) | |||
2920 | sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue); | 2891 | sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue); |
2921 | irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe; | 2892 | irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe; |
2922 | r = amdgpu_ring_init(adev, ring, 1024, | 2893 | r = amdgpu_ring_init(adev, ring, 1024, |
2923 | 0x80000000, 0xf, | 2894 | &adev->gfx.eop_irq, irq_type); |
2924 | &adev->gfx.eop_irq, irq_type, | ||
2925 | AMDGPU_RING_TYPE_COMPUTE); | ||
2926 | if (r) | 2895 | if (r) |
2927 | return r; | 2896 | return r; |
2928 | } | 2897 | } |
@@ -3237,7 +3206,7 @@ static int gfx_v6_0_set_powergating_state(void *handle, | |||
3237 | return 0; | 3206 | return 0; |
3238 | } | 3207 | } |
3239 | 3208 | ||
3240 | const struct amd_ip_funcs gfx_v6_0_ip_funcs = { | 3209 | static const struct amd_ip_funcs gfx_v6_0_ip_funcs = { |
3241 | .name = "gfx_v6_0", | 3210 | .name = "gfx_v6_0", |
3242 | .early_init = gfx_v6_0_early_init, | 3211 | .early_init = gfx_v6_0_early_init, |
3243 | .late_init = NULL, | 3212 | .late_init = NULL, |
@@ -3255,10 +3224,20 @@ const struct amd_ip_funcs gfx_v6_0_ip_funcs = { | |||
3255 | }; | 3224 | }; |
3256 | 3225 | ||
3257 | static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = { | 3226 | static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = { |
3227 | .type = AMDGPU_RING_TYPE_GFX, | ||
3228 | .align_mask = 0xff, | ||
3229 | .nop = 0x80000000, | ||
3258 | .get_rptr = gfx_v6_0_ring_get_rptr, | 3230 | .get_rptr = gfx_v6_0_ring_get_rptr, |
3259 | .get_wptr = gfx_v6_0_ring_get_wptr, | 3231 | .get_wptr = gfx_v6_0_ring_get_wptr, |
3260 | .set_wptr = gfx_v6_0_ring_set_wptr_gfx, | 3232 | .set_wptr = gfx_v6_0_ring_set_wptr_gfx, |
3261 | .parse_cs = NULL, | 3233 | .emit_frame_size = |
3234 | 5 + /* gfx_v6_0_ring_emit_hdp_flush */ | ||
3235 | 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */ | ||
3236 | 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */ | ||
3237 | 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */ | ||
3238 | 17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */ | ||
3239 | 3, /* gfx_v6_ring_emit_cntxcntl */ | ||
3240 | .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */ | ||
3262 | .emit_ib = gfx_v6_0_ring_emit_ib, | 3241 | .emit_ib = gfx_v6_0_ring_emit_ib, |
3263 | .emit_fence = gfx_v6_0_ring_emit_fence, | 3242 | .emit_fence = gfx_v6_0_ring_emit_fence, |
3264 | .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync, | 3243 | .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync, |
@@ -3269,15 +3248,22 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = { | |||
3269 | .test_ib = gfx_v6_0_ring_test_ib, | 3248 | .test_ib = gfx_v6_0_ring_test_ib, |
3270 | .insert_nop = amdgpu_ring_insert_nop, | 3249 | .insert_nop = amdgpu_ring_insert_nop, |
3271 | .emit_cntxcntl = gfx_v6_ring_emit_cntxcntl, | 3250 | .emit_cntxcntl = gfx_v6_ring_emit_cntxcntl, |
3272 | .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size, | ||
3273 | .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_gfx, | ||
3274 | }; | 3251 | }; |
3275 | 3252 | ||
3276 | static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = { | 3253 | static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = { |
3254 | .type = AMDGPU_RING_TYPE_COMPUTE, | ||
3255 | .align_mask = 0xff, | ||
3256 | .nop = 0x80000000, | ||
3277 | .get_rptr = gfx_v6_0_ring_get_rptr, | 3257 | .get_rptr = gfx_v6_0_ring_get_rptr, |
3278 | .get_wptr = gfx_v6_0_ring_get_wptr, | 3258 | .get_wptr = gfx_v6_0_ring_get_wptr, |
3279 | .set_wptr = gfx_v6_0_ring_set_wptr_compute, | 3259 | .set_wptr = gfx_v6_0_ring_set_wptr_compute, |
3280 | .parse_cs = NULL, | 3260 | .emit_frame_size = |
3261 | 5 + /* gfx_v6_0_ring_emit_hdp_flush */ | ||
3262 | 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */ | ||
3263 | 7 + /* gfx_v6_0_ring_emit_pipeline_sync */ | ||
3264 | 17 + /* gfx_v6_0_ring_emit_vm_flush */ | ||
3265 | 14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */ | ||
3266 | .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */ | ||
3281 | .emit_ib = gfx_v6_0_ring_emit_ib, | 3267 | .emit_ib = gfx_v6_0_ring_emit_ib, |
3282 | .emit_fence = gfx_v6_0_ring_emit_fence, | 3268 | .emit_fence = gfx_v6_0_ring_emit_fence, |
3283 | .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync, | 3269 | .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync, |
@@ -3287,8 +3273,6 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = { | |||
3287 | .test_ring = gfx_v6_0_ring_test_ring, | 3273 | .test_ring = gfx_v6_0_ring_test_ring, |
3288 | .test_ib = gfx_v6_0_ring_test_ib, | 3274 | .test_ib = gfx_v6_0_ring_test_ib, |
3289 | .insert_nop = amdgpu_ring_insert_nop, | 3275 | .insert_nop = amdgpu_ring_insert_nop, |
3290 | .get_emit_ib_size = gfx_v6_0_ring_get_emit_ib_size, | ||
3291 | .get_dma_frame_size = gfx_v6_0_ring_get_dma_frame_size_compute, | ||
3292 | }; | 3276 | }; |
3293 | 3277 | ||
3294 | static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev) | 3278 | static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev) |
@@ -3360,3 +3344,12 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev) | |||
3360 | cu_info->number = active_cu_number; | 3344 | cu_info->number = active_cu_number; |
3361 | cu_info->ao_cu_mask = ao_cu_mask; | 3345 | cu_info->ao_cu_mask = ao_cu_mask; |
3362 | } | 3346 | } |
3347 | |||
3348 | const struct amdgpu_ip_block_version gfx_v6_0_ip_block = | ||
3349 | { | ||
3350 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
3351 | .major = 6, | ||
3352 | .minor = 0, | ||
3353 | .rev = 0, | ||
3354 | .funcs = &gfx_v6_0_ip_funcs, | ||
3355 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h index b9657e72b248..ced6fc42f688 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __GFX_V6_0_H__ | 24 | #ifndef __GFX_V6_0_H__ |
25 | #define __GFX_V6_0_H__ | 25 | #define __GFX_V6_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs gfx_v6_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version gfx_v6_0_ip_block; |
28 | 28 | ||
29 | #endif | 29 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 71116da9e782..903aa240e946 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
@@ -2077,9 +2077,9 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring) | |||
2077 | static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) | 2077 | static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) |
2078 | { | 2078 | { |
2079 | u32 ref_and_mask; | 2079 | u32 ref_and_mask; |
2080 | int usepfp = ring->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1; | 2080 | int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1; |
2081 | 2081 | ||
2082 | if (ring->type == AMDGPU_RING_TYPE_COMPUTE) { | 2082 | if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { |
2083 | switch (ring->me) { | 2083 | switch (ring->me) { |
2084 | case 1: | 2084 | case 1: |
2085 | ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe; | 2085 | ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe; |
@@ -3222,7 +3222,7 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev) | |||
3222 | */ | 3222 | */ |
3223 | static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) | 3223 | static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) |
3224 | { | 3224 | { |
3225 | int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); | 3225 | int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); |
3226 | uint32_t seq = ring->fence_drv.sync_seq; | 3226 | uint32_t seq = ring->fence_drv.sync_seq; |
3227 | uint64_t addr = ring->fence_drv.gpu_addr; | 3227 | uint64_t addr = ring->fence_drv.gpu_addr; |
3228 | 3228 | ||
@@ -3262,7 +3262,7 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) | |||
3262 | static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, | 3262 | static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, |
3263 | unsigned vm_id, uint64_t pd_addr) | 3263 | unsigned vm_id, uint64_t pd_addr) |
3264 | { | 3264 | { |
3265 | int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); | 3265 | int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); |
3266 | 3266 | ||
3267 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | 3267 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
3268 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | | 3268 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | |
@@ -3391,7 +3391,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) | |||
3391 | if (adev->gfx.rlc.save_restore_obj == NULL) { | 3391 | if (adev->gfx.rlc.save_restore_obj == NULL) { |
3392 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, | 3392 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, |
3393 | AMDGPU_GEM_DOMAIN_VRAM, | 3393 | AMDGPU_GEM_DOMAIN_VRAM, |
3394 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 3394 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
3395 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
3395 | NULL, NULL, | 3396 | NULL, NULL, |
3396 | &adev->gfx.rlc.save_restore_obj); | 3397 | &adev->gfx.rlc.save_restore_obj); |
3397 | if (r) { | 3398 | if (r) { |
@@ -3435,7 +3436,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) | |||
3435 | if (adev->gfx.rlc.clear_state_obj == NULL) { | 3436 | if (adev->gfx.rlc.clear_state_obj == NULL) { |
3436 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, | 3437 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, |
3437 | AMDGPU_GEM_DOMAIN_VRAM, | 3438 | AMDGPU_GEM_DOMAIN_VRAM, |
3438 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 3439 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
3440 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
3439 | NULL, NULL, | 3441 | NULL, NULL, |
3440 | &adev->gfx.rlc.clear_state_obj); | 3442 | &adev->gfx.rlc.clear_state_obj); |
3441 | if (r) { | 3443 | if (r) { |
@@ -3475,7 +3477,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) | |||
3475 | if (adev->gfx.rlc.cp_table_obj == NULL) { | 3477 | if (adev->gfx.rlc.cp_table_obj == NULL) { |
3476 | r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, | 3478 | r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, |
3477 | AMDGPU_GEM_DOMAIN_VRAM, | 3479 | AMDGPU_GEM_DOMAIN_VRAM, |
3478 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 3480 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
3481 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
3479 | NULL, NULL, | 3482 | NULL, NULL, |
3480 | &adev->gfx.rlc.cp_table_obj); | 3483 | &adev->gfx.rlc.cp_table_obj); |
3481 | if (r) { | 3484 | if (r) { |
@@ -4354,44 +4357,40 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring, | |||
4354 | amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); | 4357 | amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); |
4355 | } | 4358 | } |
4356 | 4359 | ||
4357 | static unsigned gfx_v7_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring) | 4360 | static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address) |
4358 | { | 4361 | { |
4359 | return | 4362 | WREG32(mmSQ_IND_INDEX, (wave & 0xF) | ((simd & 0x3) << 4) | (address << 16) | (1 << 13)); |
4360 | 4; /* gfx_v7_0_ring_emit_ib_gfx */ | 4363 | return RREG32(mmSQ_IND_DATA); |
4361 | } | 4364 | } |
4362 | 4365 | ||
4363 | static unsigned gfx_v7_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring) | 4366 | static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) |
4364 | { | 4367 | { |
4365 | return | 4368 | /* type 0 wave data */ |
4366 | 20 + /* gfx_v7_0_ring_emit_gds_switch */ | 4369 | dst[(*no_fields)++] = 0; |
4367 | 7 + /* gfx_v7_0_ring_emit_hdp_flush */ | 4370 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS); |
4368 | 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */ | 4371 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO); |
4369 | 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */ | 4372 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI); |
4370 | 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */ | 4373 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO); |
4371 | 17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */ | 4374 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI); |
4372 | 3; /* gfx_v7_ring_emit_cntxcntl */ | 4375 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID); |
4373 | } | 4376 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0); |
4374 | 4377 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1); | |
4375 | static unsigned gfx_v7_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring) | 4378 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC); |
4376 | { | 4379 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC); |
4377 | return | 4380 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS); |
4378 | 4; /* gfx_v7_0_ring_emit_ib_compute */ | 4381 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS); |
4379 | } | 4382 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO); |
4380 | 4383 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI); | |
4381 | static unsigned gfx_v7_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring) | 4384 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO); |
4382 | { | 4385 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI); |
4383 | return | 4386 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0); |
4384 | 20 + /* gfx_v7_0_ring_emit_gds_switch */ | 4387 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0); |
4385 | 7 + /* gfx_v7_0_ring_emit_hdp_flush */ | ||
4386 | 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */ | ||
4387 | 7 + /* gfx_v7_0_ring_emit_pipeline_sync */ | ||
4388 | 17 + /* gfx_v7_0_ring_emit_vm_flush */ | ||
4389 | 7 + 7 + 7; /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */ | ||
4390 | } | 4388 | } |
4391 | 4389 | ||
4392 | static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = { | 4390 | static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = { |
4393 | .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, | 4391 | .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, |
4394 | .select_se_sh = &gfx_v7_0_select_se_sh, | 4392 | .select_se_sh = &gfx_v7_0_select_se_sh, |
4393 | .read_wave_data = &gfx_v7_0_read_wave_data, | ||
4395 | }; | 4394 | }; |
4396 | 4395 | ||
4397 | static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { | 4396 | static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { |
@@ -4643,9 +4642,7 @@ static int gfx_v7_0_sw_init(void *handle) | |||
4643 | ring->ring_obj = NULL; | 4642 | ring->ring_obj = NULL; |
4644 | sprintf(ring->name, "gfx"); | 4643 | sprintf(ring->name, "gfx"); |
4645 | r = amdgpu_ring_init(adev, ring, 1024, | 4644 | r = amdgpu_ring_init(adev, ring, 1024, |
4646 | PACKET3(PACKET3_NOP, 0x3FFF), 0xf, | 4645 | &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP); |
4647 | &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP, | ||
4648 | AMDGPU_RING_TYPE_GFX); | ||
4649 | if (r) | 4646 | if (r) |
4650 | return r; | 4647 | return r; |
4651 | } | 4648 | } |
@@ -4670,9 +4667,7 @@ static int gfx_v7_0_sw_init(void *handle) | |||
4670 | irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe; | 4667 | irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe; |
4671 | /* type-2 packets are deprecated on MEC, use type-3 instead */ | 4668 | /* type-2 packets are deprecated on MEC, use type-3 instead */ |
4672 | r = amdgpu_ring_init(adev, ring, 1024, | 4669 | r = amdgpu_ring_init(adev, ring, 1024, |
4673 | PACKET3(PACKET3_NOP, 0x3FFF), 0xf, | 4670 | &adev->gfx.eop_irq, irq_type); |
4674 | &adev->gfx.eop_irq, irq_type, | ||
4675 | AMDGPU_RING_TYPE_COMPUTE); | ||
4676 | if (r) | 4671 | if (r) |
4677 | return r; | 4672 | return r; |
4678 | } | 4673 | } |
@@ -5123,7 +5118,7 @@ static int gfx_v7_0_set_powergating_state(void *handle, | |||
5123 | return 0; | 5118 | return 0; |
5124 | } | 5119 | } |
5125 | 5120 | ||
5126 | const struct amd_ip_funcs gfx_v7_0_ip_funcs = { | 5121 | static const struct amd_ip_funcs gfx_v7_0_ip_funcs = { |
5127 | .name = "gfx_v7_0", | 5122 | .name = "gfx_v7_0", |
5128 | .early_init = gfx_v7_0_early_init, | 5123 | .early_init = gfx_v7_0_early_init, |
5129 | .late_init = gfx_v7_0_late_init, | 5124 | .late_init = gfx_v7_0_late_init, |
@@ -5141,10 +5136,21 @@ const struct amd_ip_funcs gfx_v7_0_ip_funcs = { | |||
5141 | }; | 5136 | }; |
5142 | 5137 | ||
5143 | static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { | 5138 | static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { |
5139 | .type = AMDGPU_RING_TYPE_GFX, | ||
5140 | .align_mask = 0xff, | ||
5141 | .nop = PACKET3(PACKET3_NOP, 0x3FFF), | ||
5144 | .get_rptr = gfx_v7_0_ring_get_rptr, | 5142 | .get_rptr = gfx_v7_0_ring_get_rptr, |
5145 | .get_wptr = gfx_v7_0_ring_get_wptr_gfx, | 5143 | .get_wptr = gfx_v7_0_ring_get_wptr_gfx, |
5146 | .set_wptr = gfx_v7_0_ring_set_wptr_gfx, | 5144 | .set_wptr = gfx_v7_0_ring_set_wptr_gfx, |
5147 | .parse_cs = NULL, | 5145 | .emit_frame_size = |
5146 | 20 + /* gfx_v7_0_ring_emit_gds_switch */ | ||
5147 | 7 + /* gfx_v7_0_ring_emit_hdp_flush */ | ||
5148 | 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */ | ||
5149 | 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */ | ||
5150 | 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */ | ||
5151 | 17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */ | ||
5152 | 3, /* gfx_v7_ring_emit_cntxcntl */ | ||
5153 | .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */ | ||
5148 | .emit_ib = gfx_v7_0_ring_emit_ib_gfx, | 5154 | .emit_ib = gfx_v7_0_ring_emit_ib_gfx, |
5149 | .emit_fence = gfx_v7_0_ring_emit_fence_gfx, | 5155 | .emit_fence = gfx_v7_0_ring_emit_fence_gfx, |
5150 | .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync, | 5156 | .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync, |
@@ -5157,15 +5163,23 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { | |||
5157 | .insert_nop = amdgpu_ring_insert_nop, | 5163 | .insert_nop = amdgpu_ring_insert_nop, |
5158 | .pad_ib = amdgpu_ring_generic_pad_ib, | 5164 | .pad_ib = amdgpu_ring_generic_pad_ib, |
5159 | .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl, | 5165 | .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl, |
5160 | .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_gfx, | ||
5161 | .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_gfx, | ||
5162 | }; | 5166 | }; |
5163 | 5167 | ||
5164 | static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { | 5168 | static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { |
5169 | .type = AMDGPU_RING_TYPE_COMPUTE, | ||
5170 | .align_mask = 0xff, | ||
5171 | .nop = PACKET3(PACKET3_NOP, 0x3FFF), | ||
5165 | .get_rptr = gfx_v7_0_ring_get_rptr, | 5172 | .get_rptr = gfx_v7_0_ring_get_rptr, |
5166 | .get_wptr = gfx_v7_0_ring_get_wptr_compute, | 5173 | .get_wptr = gfx_v7_0_ring_get_wptr_compute, |
5167 | .set_wptr = gfx_v7_0_ring_set_wptr_compute, | 5174 | .set_wptr = gfx_v7_0_ring_set_wptr_compute, |
5168 | .parse_cs = NULL, | 5175 | .emit_frame_size = |
5176 | 20 + /* gfx_v7_0_ring_emit_gds_switch */ | ||
5177 | 7 + /* gfx_v7_0_ring_emit_hdp_flush */ | ||
5178 | 5 + /* gfx_v7_0_ring_emit_hdp_invalidate */ | ||
5179 | 7 + /* gfx_v7_0_ring_emit_pipeline_sync */ | ||
5180 | 17 + /* gfx_v7_0_ring_emit_vm_flush */ | ||
5181 | 7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */ | ||
5182 | .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_compute */ | ||
5169 | .emit_ib = gfx_v7_0_ring_emit_ib_compute, | 5183 | .emit_ib = gfx_v7_0_ring_emit_ib_compute, |
5170 | .emit_fence = gfx_v7_0_ring_emit_fence_compute, | 5184 | .emit_fence = gfx_v7_0_ring_emit_fence_compute, |
5171 | .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync, | 5185 | .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync, |
@@ -5177,8 +5191,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { | |||
5177 | .test_ib = gfx_v7_0_ring_test_ib, | 5191 | .test_ib = gfx_v7_0_ring_test_ib, |
5178 | .insert_nop = amdgpu_ring_insert_nop, | 5192 | .insert_nop = amdgpu_ring_insert_nop, |
5179 | .pad_ib = amdgpu_ring_generic_pad_ib, | 5193 | .pad_ib = amdgpu_ring_generic_pad_ib, |
5180 | .get_emit_ib_size = gfx_v7_0_ring_get_emit_ib_size_compute, | ||
5181 | .get_dma_frame_size = gfx_v7_0_ring_get_dma_frame_size_compute, | ||
5182 | }; | 5194 | }; |
5183 | 5195 | ||
5184 | static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev) | 5196 | static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev) |
@@ -5289,3 +5301,39 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) | |||
5289 | cu_info->number = active_cu_number; | 5301 | cu_info->number = active_cu_number; |
5290 | cu_info->ao_cu_mask = ao_cu_mask; | 5302 | cu_info->ao_cu_mask = ao_cu_mask; |
5291 | } | 5303 | } |
5304 | |||
5305 | const struct amdgpu_ip_block_version gfx_v7_0_ip_block = | ||
5306 | { | ||
5307 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
5308 | .major = 7, | ||
5309 | .minor = 0, | ||
5310 | .rev = 0, | ||
5311 | .funcs = &gfx_v7_0_ip_funcs, | ||
5312 | }; | ||
5313 | |||
5314 | const struct amdgpu_ip_block_version gfx_v7_1_ip_block = | ||
5315 | { | ||
5316 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
5317 | .major = 7, | ||
5318 | .minor = 1, | ||
5319 | .rev = 0, | ||
5320 | .funcs = &gfx_v7_0_ip_funcs, | ||
5321 | }; | ||
5322 | |||
5323 | const struct amdgpu_ip_block_version gfx_v7_2_ip_block = | ||
5324 | { | ||
5325 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
5326 | .major = 7, | ||
5327 | .minor = 2, | ||
5328 | .rev = 0, | ||
5329 | .funcs = &gfx_v7_0_ip_funcs, | ||
5330 | }; | ||
5331 | |||
5332 | const struct amdgpu_ip_block_version gfx_v7_3_ip_block = | ||
5333 | { | ||
5334 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
5335 | .major = 7, | ||
5336 | .minor = 3, | ||
5337 | .rev = 0, | ||
5338 | .funcs = &gfx_v7_0_ip_funcs, | ||
5339 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h index 94e3ea147c26..2f5164cc0e53 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h | |||
@@ -24,6 +24,9 @@ | |||
24 | #ifndef __GFX_V7_0_H__ | 24 | #ifndef __GFX_V7_0_H__ |
25 | #define __GFX_V7_0_H__ | 25 | #define __GFX_V7_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs gfx_v7_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version gfx_v7_0_ip_block; |
28 | extern const struct amdgpu_ip_block_version gfx_v7_1_ip_block; | ||
29 | extern const struct amdgpu_ip_block_version gfx_v7_2_ip_block; | ||
30 | extern const struct amdgpu_ip_block_version gfx_v7_3_ip_block; | ||
28 | 31 | ||
29 | #endif | 32 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index ee6a48a09214..1c2544f314c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
@@ -1058,6 +1058,19 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) | |||
1058 | adev->firmware.fw_size += | 1058 | adev->firmware.fw_size += |
1059 | ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); | 1059 | ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); |
1060 | 1060 | ||
1061 | /* we need account JT in */ | ||
1062 | cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; | ||
1063 | adev->firmware.fw_size += | ||
1064 | ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE); | ||
1065 | |||
1066 | if (amdgpu_sriov_vf(adev)) { | ||
1067 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE]; | ||
1068 | info->ucode_id = AMDGPU_UCODE_ID_STORAGE; | ||
1069 | info->fw = adev->gfx.mec_fw; | ||
1070 | adev->firmware.fw_size += | ||
1071 | ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE); | ||
1072 | } | ||
1073 | |||
1061 | if (adev->gfx.mec2_fw) { | 1074 | if (adev->gfx.mec2_fw) { |
1062 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2]; | 1075 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2]; |
1063 | info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2; | 1076 | info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2; |
@@ -1127,34 +1140,8 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, | |||
1127 | buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2)); | 1140 | buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2)); |
1128 | buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - | 1141 | buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - |
1129 | PACKET3_SET_CONTEXT_REG_START); | 1142 | PACKET3_SET_CONTEXT_REG_START); |
1130 | switch (adev->asic_type) { | 1143 | buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config); |
1131 | case CHIP_TONGA: | 1144 | buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1); |
1132 | case CHIP_POLARIS10: | ||
1133 | buffer[count++] = cpu_to_le32(0x16000012); | ||
1134 | buffer[count++] = cpu_to_le32(0x0000002A); | ||
1135 | break; | ||
1136 | case CHIP_POLARIS11: | ||
1137 | buffer[count++] = cpu_to_le32(0x16000012); | ||
1138 | buffer[count++] = cpu_to_le32(0x00000000); | ||
1139 | break; | ||
1140 | case CHIP_FIJI: | ||
1141 | buffer[count++] = cpu_to_le32(0x3a00161a); | ||
1142 | buffer[count++] = cpu_to_le32(0x0000002e); | ||
1143 | break; | ||
1144 | case CHIP_TOPAZ: | ||
1145 | case CHIP_CARRIZO: | ||
1146 | buffer[count++] = cpu_to_le32(0x00000002); | ||
1147 | buffer[count++] = cpu_to_le32(0x00000000); | ||
1148 | break; | ||
1149 | case CHIP_STONEY: | ||
1150 | buffer[count++] = cpu_to_le32(0x00000000); | ||
1151 | buffer[count++] = cpu_to_le32(0x00000000); | ||
1152 | break; | ||
1153 | default: | ||
1154 | buffer[count++] = cpu_to_le32(0x00000000); | ||
1155 | buffer[count++] = cpu_to_le32(0x00000000); | ||
1156 | break; | ||
1157 | } | ||
1158 | 1145 | ||
1159 | buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); | 1146 | buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); |
1160 | buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); | 1147 | buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); |
@@ -1273,7 +1260,8 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) | |||
1273 | if (adev->gfx.rlc.clear_state_obj == NULL) { | 1260 | if (adev->gfx.rlc.clear_state_obj == NULL) { |
1274 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, | 1261 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, |
1275 | AMDGPU_GEM_DOMAIN_VRAM, | 1262 | AMDGPU_GEM_DOMAIN_VRAM, |
1276 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 1263 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
1264 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
1277 | NULL, NULL, | 1265 | NULL, NULL, |
1278 | &adev->gfx.rlc.clear_state_obj); | 1266 | &adev->gfx.rlc.clear_state_obj); |
1279 | if (r) { | 1267 | if (r) { |
@@ -1315,7 +1303,8 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) | |||
1315 | if (adev->gfx.rlc.cp_table_obj == NULL) { | 1303 | if (adev->gfx.rlc.cp_table_obj == NULL) { |
1316 | r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, | 1304 | r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, |
1317 | AMDGPU_GEM_DOMAIN_VRAM, | 1305 | AMDGPU_GEM_DOMAIN_VRAM, |
1318 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 1306 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
1307 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
1319 | NULL, NULL, | 1308 | NULL, NULL, |
1320 | &adev->gfx.rlc.cp_table_obj); | 1309 | &adev->gfx.rlc.cp_table_obj); |
1321 | if (r) { | 1310 | if (r) { |
@@ -2045,10 +2034,8 @@ static int gfx_v8_0_sw_init(void *handle) | |||
2045 | ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0; | 2034 | ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0; |
2046 | } | 2035 | } |
2047 | 2036 | ||
2048 | r = amdgpu_ring_init(adev, ring, 1024, | 2037 | r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, |
2049 | PACKET3(PACKET3_NOP, 0x3FFF), 0xf, | 2038 | AMDGPU_CP_IRQ_GFX_EOP); |
2050 | &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP, | ||
2051 | AMDGPU_RING_TYPE_GFX); | ||
2052 | if (r) | 2039 | if (r) |
2053 | return r; | 2040 | return r; |
2054 | } | 2041 | } |
@@ -2072,10 +2059,8 @@ static int gfx_v8_0_sw_init(void *handle) | |||
2072 | sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); | 2059 | sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); |
2073 | irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe; | 2060 | irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe; |
2074 | /* type-2 packets are deprecated on MEC, use type-3 instead */ | 2061 | /* type-2 packets are deprecated on MEC, use type-3 instead */ |
2075 | r = amdgpu_ring_init(adev, ring, 1024, | 2062 | r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, |
2076 | PACKET3(PACKET3_NOP, 0x3FFF), 0xf, | 2063 | irq_type); |
2077 | &adev->gfx.eop_irq, irq_type, | ||
2078 | AMDGPU_RING_TYPE_COMPUTE); | ||
2079 | if (r) | 2064 | if (r) |
2080 | return r; | 2065 | return r; |
2081 | } | 2066 | } |
@@ -3679,6 +3664,21 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) | |||
3679 | num_rb_pipes); | 3664 | num_rb_pipes); |
3680 | } | 3665 | } |
3681 | 3666 | ||
3667 | /* cache the values for userspace */ | ||
3668 | for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { | ||
3669 | for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { | ||
3670 | gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); | ||
3671 | adev->gfx.config.rb_config[i][j].rb_backend_disable = | ||
3672 | RREG32(mmCC_RB_BACKEND_DISABLE); | ||
3673 | adev->gfx.config.rb_config[i][j].user_rb_backend_disable = | ||
3674 | RREG32(mmGC_USER_RB_BACKEND_DISABLE); | ||
3675 | adev->gfx.config.rb_config[i][j].raster_config = | ||
3676 | RREG32(mmPA_SC_RASTER_CONFIG); | ||
3677 | adev->gfx.config.rb_config[i][j].raster_config_1 = | ||
3678 | RREG32(mmPA_SC_RASTER_CONFIG_1); | ||
3679 | } | ||
3680 | } | ||
3681 | gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | ||
3682 | mutex_unlock(&adev->grbm_idx_mutex); | 3682 | mutex_unlock(&adev->grbm_idx_mutex); |
3683 | } | 3683 | } |
3684 | 3684 | ||
@@ -4331,7 +4331,7 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) | |||
4331 | struct amdgpu_ring *ring; | 4331 | struct amdgpu_ring *ring; |
4332 | u32 tmp; | 4332 | u32 tmp; |
4333 | u32 rb_bufsz; | 4333 | u32 rb_bufsz; |
4334 | u64 rb_addr, rptr_addr; | 4334 | u64 rb_addr, rptr_addr, wptr_gpu_addr; |
4335 | int r; | 4335 | int r; |
4336 | 4336 | ||
4337 | /* Set the write pointer delay */ | 4337 | /* Set the write pointer delay */ |
@@ -4362,6 +4362,9 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) | |||
4362 | WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); | 4362 | WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); |
4363 | WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF); | 4363 | WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF); |
4364 | 4364 | ||
4365 | wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); | ||
4366 | WREG32(mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr)); | ||
4367 | WREG32(mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr)); | ||
4365 | mdelay(1); | 4368 | mdelay(1); |
4366 | WREG32(mmCP_RB0_CNTL, tmp); | 4369 | WREG32(mmCP_RB0_CNTL, tmp); |
4367 | 4370 | ||
@@ -5438,9 +5441,41 @@ static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring, | |||
5438 | amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); | 5441 | amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); |
5439 | } | 5442 | } |
5440 | 5443 | ||
5444 | static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address) | ||
5445 | { | ||
5446 | WREG32(mmSQ_IND_INDEX, (wave & 0xF) | ((simd & 0x3) << 4) | (address << 16) | (1 << 13)); | ||
5447 | return RREG32(mmSQ_IND_DATA); | ||
5448 | } | ||
5449 | |||
5450 | static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) | ||
5451 | { | ||
5452 | /* type 0 wave data */ | ||
5453 | dst[(*no_fields)++] = 0; | ||
5454 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS); | ||
5455 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO); | ||
5456 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI); | ||
5457 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO); | ||
5458 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI); | ||
5459 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID); | ||
5460 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0); | ||
5461 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1); | ||
5462 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC); | ||
5463 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC); | ||
5464 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS); | ||
5465 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS); | ||
5466 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO); | ||
5467 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI); | ||
5468 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO); | ||
5469 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI); | ||
5470 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0); | ||
5471 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0); | ||
5472 | } | ||
5473 | |||
5474 | |||
5441 | static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = { | 5475 | static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = { |
5442 | .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, | 5476 | .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, |
5443 | .select_se_sh = &gfx_v8_0_select_se_sh, | 5477 | .select_se_sh = &gfx_v8_0_select_se_sh, |
5478 | .read_wave_data = &gfx_v8_0_read_wave_data, | ||
5444 | }; | 5479 | }; |
5445 | 5480 | ||
5446 | static int gfx_v8_0_early_init(void *handle) | 5481 | static int gfx_v8_0_early_init(void *handle) |
@@ -6120,7 +6155,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) | |||
6120 | { | 6155 | { |
6121 | u32 ref_and_mask, reg_mem_engine; | 6156 | u32 ref_and_mask, reg_mem_engine; |
6122 | 6157 | ||
6123 | if (ring->type == AMDGPU_RING_TYPE_COMPUTE) { | 6158 | if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { |
6124 | switch (ring->me) { | 6159 | switch (ring->me) { |
6125 | case 1: | 6160 | case 1: |
6126 | ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe; | 6161 | ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe; |
@@ -6222,7 +6257,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, | |||
6222 | 6257 | ||
6223 | static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) | 6258 | static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) |
6224 | { | 6259 | { |
6225 | int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); | 6260 | int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); |
6226 | uint32_t seq = ring->fence_drv.sync_seq; | 6261 | uint32_t seq = ring->fence_drv.sync_seq; |
6227 | uint64_t addr = ring->fence_drv.gpu_addr; | 6262 | uint64_t addr = ring->fence_drv.gpu_addr; |
6228 | 6263 | ||
@@ -6240,11 +6275,7 @@ static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) | |||
6240 | static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, | 6275 | static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, |
6241 | unsigned vm_id, uint64_t pd_addr) | 6276 | unsigned vm_id, uint64_t pd_addr) |
6242 | { | 6277 | { |
6243 | int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); | 6278 | int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); |
6244 | |||
6245 | /* GFX8 emits 128 dw nop to prevent DE do vm_flush before CE finish CEIB */ | ||
6246 | if (usepfp) | ||
6247 | amdgpu_ring_insert_nop(ring, 128); | ||
6248 | 6279 | ||
6249 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | 6280 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
6250 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | | 6281 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | |
@@ -6360,42 +6391,6 @@ static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) | |||
6360 | amdgpu_ring_write(ring, 0); | 6391 | amdgpu_ring_write(ring, 0); |
6361 | } | 6392 | } |
6362 | 6393 | ||
6363 | static unsigned gfx_v8_0_ring_get_emit_ib_size_gfx(struct amdgpu_ring *ring) | ||
6364 | { | ||
6365 | return | ||
6366 | 4; /* gfx_v8_0_ring_emit_ib_gfx */ | ||
6367 | } | ||
6368 | |||
6369 | static unsigned gfx_v8_0_ring_get_dma_frame_size_gfx(struct amdgpu_ring *ring) | ||
6370 | { | ||
6371 | return | ||
6372 | 20 + /* gfx_v8_0_ring_emit_gds_switch */ | ||
6373 | 7 + /* gfx_v8_0_ring_emit_hdp_flush */ | ||
6374 | 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */ | ||
6375 | 6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */ | ||
6376 | 7 + /* gfx_v8_0_ring_emit_pipeline_sync */ | ||
6377 | 256 + 19 + /* gfx_v8_0_ring_emit_vm_flush */ | ||
6378 | 2 + /* gfx_v8_ring_emit_sb */ | ||
6379 | 3; /* gfx_v8_ring_emit_cntxcntl */ | ||
6380 | } | ||
6381 | |||
6382 | static unsigned gfx_v8_0_ring_get_emit_ib_size_compute(struct amdgpu_ring *ring) | ||
6383 | { | ||
6384 | return | ||
6385 | 4; /* gfx_v8_0_ring_emit_ib_compute */ | ||
6386 | } | ||
6387 | |||
6388 | static unsigned gfx_v8_0_ring_get_dma_frame_size_compute(struct amdgpu_ring *ring) | ||
6389 | { | ||
6390 | return | ||
6391 | 20 + /* gfx_v8_0_ring_emit_gds_switch */ | ||
6392 | 7 + /* gfx_v8_0_ring_emit_hdp_flush */ | ||
6393 | 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */ | ||
6394 | 7 + /* gfx_v8_0_ring_emit_pipeline_sync */ | ||
6395 | 17 + /* gfx_v8_0_ring_emit_vm_flush */ | ||
6396 | 7 + 7 + 7; /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */ | ||
6397 | } | ||
6398 | |||
6399 | static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, | 6394 | static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, |
6400 | enum amdgpu_interrupt_state state) | 6395 | enum amdgpu_interrupt_state state) |
6401 | { | 6396 | { |
@@ -6541,7 +6536,7 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev, | |||
6541 | return 0; | 6536 | return 0; |
6542 | } | 6537 | } |
6543 | 6538 | ||
6544 | const struct amd_ip_funcs gfx_v8_0_ip_funcs = { | 6539 | static const struct amd_ip_funcs gfx_v8_0_ip_funcs = { |
6545 | .name = "gfx_v8_0", | 6540 | .name = "gfx_v8_0", |
6546 | .early_init = gfx_v8_0_early_init, | 6541 | .early_init = gfx_v8_0_early_init, |
6547 | .late_init = gfx_v8_0_late_init, | 6542 | .late_init = gfx_v8_0_late_init, |
@@ -6562,10 +6557,22 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = { | |||
6562 | }; | 6557 | }; |
6563 | 6558 | ||
6564 | static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { | 6559 | static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { |
6560 | .type = AMDGPU_RING_TYPE_GFX, | ||
6561 | .align_mask = 0xff, | ||
6562 | .nop = PACKET3(PACKET3_NOP, 0x3FFF), | ||
6565 | .get_rptr = gfx_v8_0_ring_get_rptr, | 6563 | .get_rptr = gfx_v8_0_ring_get_rptr, |
6566 | .get_wptr = gfx_v8_0_ring_get_wptr_gfx, | 6564 | .get_wptr = gfx_v8_0_ring_get_wptr_gfx, |
6567 | .set_wptr = gfx_v8_0_ring_set_wptr_gfx, | 6565 | .set_wptr = gfx_v8_0_ring_set_wptr_gfx, |
6568 | .parse_cs = NULL, | 6566 | .emit_frame_size = |
6567 | 20 + /* gfx_v8_0_ring_emit_gds_switch */ | ||
6568 | 7 + /* gfx_v8_0_ring_emit_hdp_flush */ | ||
6569 | 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */ | ||
6570 | 6 + 6 + 6 +/* gfx_v8_0_ring_emit_fence_gfx x3 for user fence, vm fence */ | ||
6571 | 7 + /* gfx_v8_0_ring_emit_pipeline_sync */ | ||
6572 | 128 + 19 + /* gfx_v8_0_ring_emit_vm_flush */ | ||
6573 | 2 + /* gfx_v8_ring_emit_sb */ | ||
6574 | 3, /* gfx_v8_ring_emit_cntxcntl */ | ||
6575 | .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */ | ||
6569 | .emit_ib = gfx_v8_0_ring_emit_ib_gfx, | 6576 | .emit_ib = gfx_v8_0_ring_emit_ib_gfx, |
6570 | .emit_fence = gfx_v8_0_ring_emit_fence_gfx, | 6577 | .emit_fence = gfx_v8_0_ring_emit_fence_gfx, |
6571 | .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync, | 6578 | .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync, |
@@ -6579,15 +6586,23 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { | |||
6579 | .pad_ib = amdgpu_ring_generic_pad_ib, | 6586 | .pad_ib = amdgpu_ring_generic_pad_ib, |
6580 | .emit_switch_buffer = gfx_v8_ring_emit_sb, | 6587 | .emit_switch_buffer = gfx_v8_ring_emit_sb, |
6581 | .emit_cntxcntl = gfx_v8_ring_emit_cntxcntl, | 6588 | .emit_cntxcntl = gfx_v8_ring_emit_cntxcntl, |
6582 | .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_gfx, | ||
6583 | .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_gfx, | ||
6584 | }; | 6589 | }; |
6585 | 6590 | ||
6586 | static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { | 6591 | static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { |
6592 | .type = AMDGPU_RING_TYPE_COMPUTE, | ||
6593 | .align_mask = 0xff, | ||
6594 | .nop = PACKET3(PACKET3_NOP, 0x3FFF), | ||
6587 | .get_rptr = gfx_v8_0_ring_get_rptr, | 6595 | .get_rptr = gfx_v8_0_ring_get_rptr, |
6588 | .get_wptr = gfx_v8_0_ring_get_wptr_compute, | 6596 | .get_wptr = gfx_v8_0_ring_get_wptr_compute, |
6589 | .set_wptr = gfx_v8_0_ring_set_wptr_compute, | 6597 | .set_wptr = gfx_v8_0_ring_set_wptr_compute, |
6590 | .parse_cs = NULL, | 6598 | .emit_frame_size = |
6599 | 20 + /* gfx_v8_0_ring_emit_gds_switch */ | ||
6600 | 7 + /* gfx_v8_0_ring_emit_hdp_flush */ | ||
6601 | 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */ | ||
6602 | 7 + /* gfx_v8_0_ring_emit_pipeline_sync */ | ||
6603 | 17 + /* gfx_v8_0_ring_emit_vm_flush */ | ||
6604 | 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */ | ||
6605 | .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */ | ||
6591 | .emit_ib = gfx_v8_0_ring_emit_ib_compute, | 6606 | .emit_ib = gfx_v8_0_ring_emit_ib_compute, |
6592 | .emit_fence = gfx_v8_0_ring_emit_fence_compute, | 6607 | .emit_fence = gfx_v8_0_ring_emit_fence_compute, |
6593 | .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync, | 6608 | .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync, |
@@ -6599,8 +6614,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { | |||
6599 | .test_ib = gfx_v8_0_ring_test_ib, | 6614 | .test_ib = gfx_v8_0_ring_test_ib, |
6600 | .insert_nop = amdgpu_ring_insert_nop, | 6615 | .insert_nop = amdgpu_ring_insert_nop, |
6601 | .pad_ib = amdgpu_ring_generic_pad_ib, | 6616 | .pad_ib = amdgpu_ring_generic_pad_ib, |
6602 | .get_emit_ib_size = gfx_v8_0_ring_get_emit_ib_size_compute, | ||
6603 | .get_dma_frame_size = gfx_v8_0_ring_get_dma_frame_size_compute, | ||
6604 | }; | 6617 | }; |
6605 | 6618 | ||
6606 | static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev) | 6619 | static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev) |
@@ -6753,3 +6766,21 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev) | |||
6753 | cu_info->number = active_cu_number; | 6766 | cu_info->number = active_cu_number; |
6754 | cu_info->ao_cu_mask = ao_cu_mask; | 6767 | cu_info->ao_cu_mask = ao_cu_mask; |
6755 | } | 6768 | } |
6769 | |||
6770 | const struct amdgpu_ip_block_version gfx_v8_0_ip_block = | ||
6771 | { | ||
6772 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
6773 | .major = 8, | ||
6774 | .minor = 0, | ||
6775 | .rev = 0, | ||
6776 | .funcs = &gfx_v8_0_ip_funcs, | ||
6777 | }; | ||
6778 | |||
6779 | const struct amdgpu_ip_block_version gfx_v8_1_ip_block = | ||
6780 | { | ||
6781 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
6782 | .major = 8, | ||
6783 | .minor = 1, | ||
6784 | .rev = 0, | ||
6785 | .funcs = &gfx_v8_0_ip_funcs, | ||
6786 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h index ebed1f829297..788cc3ab584b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h | |||
@@ -24,6 +24,7 @@ | |||
24 | #ifndef __GFX_V8_0_H__ | 24 | #ifndef __GFX_V8_0_H__ |
25 | #define __GFX_V8_0_H__ | 25 | #define __GFX_V8_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs gfx_v8_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version gfx_v8_0_ip_block; |
28 | extern const struct amdgpu_ip_block_version gfx_v8_1_ip_block; | ||
28 | 29 | ||
29 | #endif | 30 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index b13c8aaec078..1940d36bc304 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | |||
@@ -1030,7 +1030,7 @@ static int gmc_v6_0_set_powergating_state(void *handle, | |||
1030 | return 0; | 1030 | return 0; |
1031 | } | 1031 | } |
1032 | 1032 | ||
1033 | const struct amd_ip_funcs gmc_v6_0_ip_funcs = { | 1033 | static const struct amd_ip_funcs gmc_v6_0_ip_funcs = { |
1034 | .name = "gmc_v6_0", | 1034 | .name = "gmc_v6_0", |
1035 | .early_init = gmc_v6_0_early_init, | 1035 | .early_init = gmc_v6_0_early_init, |
1036 | .late_init = gmc_v6_0_late_init, | 1036 | .late_init = gmc_v6_0_late_init, |
@@ -1069,3 +1069,11 @@ static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev) | |||
1069 | adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs; | 1069 | adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs; |
1070 | } | 1070 | } |
1071 | 1071 | ||
1072 | const struct amdgpu_ip_block_version gmc_v6_0_ip_block = | ||
1073 | { | ||
1074 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1075 | .major = 6, | ||
1076 | .minor = 0, | ||
1077 | .rev = 0, | ||
1078 | .funcs = &gmc_v6_0_ip_funcs, | ||
1079 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h index 42c4fc676cd4..ed2f64dec47a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __GMC_V6_0_H__ | 24 | #ifndef __GMC_V6_0_H__ |
25 | #define __GMC_V6_0_H__ | 25 | #define __GMC_V6_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs gmc_v6_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version gmc_v6_0_ip_block; |
28 | 28 | ||
29 | #endif | 29 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index aa0c4b964621..3a25f72980c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | |||
@@ -1235,7 +1235,7 @@ static int gmc_v7_0_set_powergating_state(void *handle, | |||
1235 | return 0; | 1235 | return 0; |
1236 | } | 1236 | } |
1237 | 1237 | ||
1238 | const struct amd_ip_funcs gmc_v7_0_ip_funcs = { | 1238 | static const struct amd_ip_funcs gmc_v7_0_ip_funcs = { |
1239 | .name = "gmc_v7_0", | 1239 | .name = "gmc_v7_0", |
1240 | .early_init = gmc_v7_0_early_init, | 1240 | .early_init = gmc_v7_0_early_init, |
1241 | .late_init = gmc_v7_0_late_init, | 1241 | .late_init = gmc_v7_0_late_init, |
@@ -1273,3 +1273,21 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev) | |||
1273 | adev->mc.vm_fault.num_types = 1; | 1273 | adev->mc.vm_fault.num_types = 1; |
1274 | adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs; | 1274 | adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs; |
1275 | } | 1275 | } |
1276 | |||
1277 | const struct amdgpu_ip_block_version gmc_v7_0_ip_block = | ||
1278 | { | ||
1279 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1280 | .major = 7, | ||
1281 | .minor = 0, | ||
1282 | .rev = 0, | ||
1283 | .funcs = &gmc_v7_0_ip_funcs, | ||
1284 | }; | ||
1285 | |||
1286 | const struct amdgpu_ip_block_version gmc_v7_4_ip_block = | ||
1287 | { | ||
1288 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1289 | .major = 7, | ||
1290 | .minor = 4, | ||
1291 | .rev = 0, | ||
1292 | .funcs = &gmc_v7_0_ip_funcs, | ||
1293 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h index 0b386b5d2f7a..ebce2966c1c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h | |||
@@ -24,6 +24,7 @@ | |||
24 | #ifndef __GMC_V7_0_H__ | 24 | #ifndef __GMC_V7_0_H__ |
25 | #define __GMC_V7_0_H__ | 25 | #define __GMC_V7_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs gmc_v7_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version gmc_v7_0_ip_block; |
28 | extern const struct amdgpu_ip_block_version gmc_v7_4_ip_block; | ||
28 | 29 | ||
29 | #endif | 30 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index c22ef140a542..74d7cc3f7e8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | |||
@@ -1436,7 +1436,7 @@ static int gmc_v8_0_set_powergating_state(void *handle, | |||
1436 | return 0; | 1436 | return 0; |
1437 | } | 1437 | } |
1438 | 1438 | ||
1439 | const struct amd_ip_funcs gmc_v8_0_ip_funcs = { | 1439 | static const struct amd_ip_funcs gmc_v8_0_ip_funcs = { |
1440 | .name = "gmc_v8_0", | 1440 | .name = "gmc_v8_0", |
1441 | .early_init = gmc_v8_0_early_init, | 1441 | .early_init = gmc_v8_0_early_init, |
1442 | .late_init = gmc_v8_0_late_init, | 1442 | .late_init = gmc_v8_0_late_init, |
@@ -1477,3 +1477,30 @@ static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev) | |||
1477 | adev->mc.vm_fault.num_types = 1; | 1477 | adev->mc.vm_fault.num_types = 1; |
1478 | adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs; | 1478 | adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs; |
1479 | } | 1479 | } |
1480 | |||
1481 | const struct amdgpu_ip_block_version gmc_v8_0_ip_block = | ||
1482 | { | ||
1483 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1484 | .major = 8, | ||
1485 | .minor = 0, | ||
1486 | .rev = 0, | ||
1487 | .funcs = &gmc_v8_0_ip_funcs, | ||
1488 | }; | ||
1489 | |||
1490 | const struct amdgpu_ip_block_version gmc_v8_1_ip_block = | ||
1491 | { | ||
1492 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1493 | .major = 8, | ||
1494 | .minor = 1, | ||
1495 | .rev = 0, | ||
1496 | .funcs = &gmc_v8_0_ip_funcs, | ||
1497 | }; | ||
1498 | |||
1499 | const struct amdgpu_ip_block_version gmc_v8_5_ip_block = | ||
1500 | { | ||
1501 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1502 | .major = 8, | ||
1503 | .minor = 5, | ||
1504 | .rev = 0, | ||
1505 | .funcs = &gmc_v8_0_ip_funcs, | ||
1506 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h index fc5001a8119d..19b8a8aed204 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h | |||
@@ -24,6 +24,8 @@ | |||
24 | #ifndef __GMC_V8_0_H__ | 24 | #ifndef __GMC_V8_0_H__ |
25 | #define __GMC_V8_0_H__ | 25 | #define __GMC_V8_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs gmc_v8_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version gmc_v8_0_ip_block; |
28 | extern const struct amdgpu_ip_block_version gmc_v8_1_ip_block; | ||
29 | extern const struct amdgpu_ip_block_version gmc_v8_5_ip_block; | ||
28 | 30 | ||
29 | #endif | 31 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index 3b8906ce3511..ac21bb7bc0f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c | |||
@@ -392,7 +392,7 @@ static int iceland_ih_set_powergating_state(void *handle, | |||
392 | return 0; | 392 | return 0; |
393 | } | 393 | } |
394 | 394 | ||
395 | const struct amd_ip_funcs iceland_ih_ip_funcs = { | 395 | static const struct amd_ip_funcs iceland_ih_ip_funcs = { |
396 | .name = "iceland_ih", | 396 | .name = "iceland_ih", |
397 | .early_init = iceland_ih_early_init, | 397 | .early_init = iceland_ih_early_init, |
398 | .late_init = NULL, | 398 | .late_init = NULL, |
@@ -421,3 +421,11 @@ static void iceland_ih_set_interrupt_funcs(struct amdgpu_device *adev) | |||
421 | adev->irq.ih_funcs = &iceland_ih_funcs; | 421 | adev->irq.ih_funcs = &iceland_ih_funcs; |
422 | } | 422 | } |
423 | 423 | ||
424 | const struct amdgpu_ip_block_version iceland_ih_ip_block = | ||
425 | { | ||
426 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
427 | .major = 2, | ||
428 | .minor = 4, | ||
429 | .rev = 0, | ||
430 | .funcs = &iceland_ih_ip_funcs, | ||
431 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.h b/drivers/gpu/drm/amd/amdgpu/iceland_ih.h index 57558cddfbcb..3235f4277548 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __ICELAND_IH_H__ | 24 | #ifndef __ICELAND_IH_H__ |
25 | #define __ICELAND_IH_H__ | 25 | #define __ICELAND_IH_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs iceland_ih_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version iceland_ih_ip_block; |
28 | 28 | ||
29 | #endif /* __ICELAND_IH_H__ */ | 29 | #endif /* __ICELAND_IH_H__ */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index f8618a3881a8..b6f2e50636a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c | |||
@@ -2796,7 +2796,7 @@ static int kv_parse_power_table(struct amdgpu_device *adev) | |||
2796 | adev->pm.dpm.num_ps = state_array->ucNumEntries; | 2796 | adev->pm.dpm.num_ps = state_array->ucNumEntries; |
2797 | 2797 | ||
2798 | /* fill in the vce power states */ | 2798 | /* fill in the vce power states */ |
2799 | for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) { | 2799 | for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { |
2800 | u32 sclk; | 2800 | u32 sclk; |
2801 | clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; | 2801 | clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; |
2802 | clock_info = (union pplib_clock_info *) | 2802 | clock_info = (union pplib_clock_info *) |
@@ -3243,6 +3243,18 @@ static int kv_dpm_set_powergating_state(void *handle, | |||
3243 | return 0; | 3243 | return 0; |
3244 | } | 3244 | } |
3245 | 3245 | ||
3246 | static int kv_check_state_equal(struct amdgpu_device *adev, | ||
3247 | struct amdgpu_ps *cps, | ||
3248 | struct amdgpu_ps *rps, | ||
3249 | bool *equal) | ||
3250 | { | ||
3251 | if (equal == NULL) | ||
3252 | return -EINVAL; | ||
3253 | |||
3254 | *equal = false; | ||
3255 | return 0; | ||
3256 | } | ||
3257 | |||
3246 | const struct amd_ip_funcs kv_dpm_ip_funcs = { | 3258 | const struct amd_ip_funcs kv_dpm_ip_funcs = { |
3247 | .name = "kv_dpm", | 3259 | .name = "kv_dpm", |
3248 | .early_init = kv_dpm_early_init, | 3260 | .early_init = kv_dpm_early_init, |
@@ -3273,6 +3285,8 @@ static const struct amdgpu_dpm_funcs kv_dpm_funcs = { | |||
3273 | .force_performance_level = &kv_dpm_force_performance_level, | 3285 | .force_performance_level = &kv_dpm_force_performance_level, |
3274 | .powergate_uvd = &kv_dpm_powergate_uvd, | 3286 | .powergate_uvd = &kv_dpm_powergate_uvd, |
3275 | .enable_bapm = &kv_dpm_enable_bapm, | 3287 | .enable_bapm = &kv_dpm_enable_bapm, |
3288 | .get_vce_clock_state = amdgpu_get_vce_clock_state, | ||
3289 | .check_state_equal = kv_check_state_equal, | ||
3276 | }; | 3290 | }; |
3277 | 3291 | ||
3278 | static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev) | 3292 | static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev) |
@@ -3291,3 +3305,12 @@ static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev) | |||
3291 | adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; | 3305 | adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; |
3292 | adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs; | 3306 | adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs; |
3293 | } | 3307 | } |
3308 | |||
3309 | const struct amdgpu_ip_block_version kv_dpm_ip_block = | ||
3310 | { | ||
3311 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
3312 | .major = 7, | ||
3313 | .minor = 0, | ||
3314 | .rev = 0, | ||
3315 | .funcs = &kv_dpm_ip_funcs, | ||
3316 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 565dab3c7218..03e8856b08ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | |||
@@ -232,10 +232,10 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | |||
232 | 232 | ||
233 | for (i = 0; i < count; i++) | 233 | for (i = 0; i < count; i++) |
234 | if (sdma && sdma->burst_nop && (i == 0)) | 234 | if (sdma && sdma->burst_nop && (i == 0)) |
235 | amdgpu_ring_write(ring, ring->nop | | 235 | amdgpu_ring_write(ring, ring->funcs->nop | |
236 | SDMA_PKT_NOP_HEADER_COUNT(count - 1)); | 236 | SDMA_PKT_NOP_HEADER_COUNT(count - 1)); |
237 | else | 237 | else |
238 | amdgpu_ring_write(ring, ring->nop); | 238 | amdgpu_ring_write(ring, ring->funcs->nop); |
239 | } | 239 | } |
240 | 240 | ||
241 | /** | 241 | /** |
@@ -902,22 +902,6 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring, | |||
902 | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ | 902 | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ |
903 | } | 903 | } |
904 | 904 | ||
905 | static unsigned sdma_v2_4_ring_get_emit_ib_size(struct amdgpu_ring *ring) | ||
906 | { | ||
907 | return | ||
908 | 7 + 6; /* sdma_v2_4_ring_emit_ib */ | ||
909 | } | ||
910 | |||
911 | static unsigned sdma_v2_4_ring_get_dma_frame_size(struct amdgpu_ring *ring) | ||
912 | { | ||
913 | return | ||
914 | 6 + /* sdma_v2_4_ring_emit_hdp_flush */ | ||
915 | 3 + /* sdma_v2_4_ring_emit_hdp_invalidate */ | ||
916 | 6 + /* sdma_v2_4_ring_emit_pipeline_sync */ | ||
917 | 12 + /* sdma_v2_4_ring_emit_vm_flush */ | ||
918 | 10 + 10 + 10; /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */ | ||
919 | } | ||
920 | |||
921 | static int sdma_v2_4_early_init(void *handle) | 905 | static int sdma_v2_4_early_init(void *handle) |
922 | { | 906 | { |
923 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 907 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
@@ -965,11 +949,10 @@ static int sdma_v2_4_sw_init(void *handle) | |||
965 | ring->use_doorbell = false; | 949 | ring->use_doorbell = false; |
966 | sprintf(ring->name, "sdma%d", i); | 950 | sprintf(ring->name, "sdma%d", i); |
967 | r = amdgpu_ring_init(adev, ring, 1024, | 951 | r = amdgpu_ring_init(adev, ring, 1024, |
968 | SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, | ||
969 | &adev->sdma.trap_irq, | 952 | &adev->sdma.trap_irq, |
970 | (i == 0) ? | 953 | (i == 0) ? |
971 | AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1, | 954 | AMDGPU_SDMA_IRQ_TRAP0 : |
972 | AMDGPU_RING_TYPE_SDMA); | 955 | AMDGPU_SDMA_IRQ_TRAP1); |
973 | if (r) | 956 | if (r) |
974 | return r; | 957 | return r; |
975 | } | 958 | } |
@@ -1204,7 +1187,7 @@ static int sdma_v2_4_set_powergating_state(void *handle, | |||
1204 | return 0; | 1187 | return 0; |
1205 | } | 1188 | } |
1206 | 1189 | ||
1207 | const struct amd_ip_funcs sdma_v2_4_ip_funcs = { | 1190 | static const struct amd_ip_funcs sdma_v2_4_ip_funcs = { |
1208 | .name = "sdma_v2_4", | 1191 | .name = "sdma_v2_4", |
1209 | .early_init = sdma_v2_4_early_init, | 1192 | .early_init = sdma_v2_4_early_init, |
1210 | .late_init = NULL, | 1193 | .late_init = NULL, |
@@ -1222,10 +1205,19 @@ const struct amd_ip_funcs sdma_v2_4_ip_funcs = { | |||
1222 | }; | 1205 | }; |
1223 | 1206 | ||
1224 | static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = { | 1207 | static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = { |
1208 | .type = AMDGPU_RING_TYPE_SDMA, | ||
1209 | .align_mask = 0xf, | ||
1210 | .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), | ||
1225 | .get_rptr = sdma_v2_4_ring_get_rptr, | 1211 | .get_rptr = sdma_v2_4_ring_get_rptr, |
1226 | .get_wptr = sdma_v2_4_ring_get_wptr, | 1212 | .get_wptr = sdma_v2_4_ring_get_wptr, |
1227 | .set_wptr = sdma_v2_4_ring_set_wptr, | 1213 | .set_wptr = sdma_v2_4_ring_set_wptr, |
1228 | .parse_cs = NULL, | 1214 | .emit_frame_size = |
1215 | 6 + /* sdma_v2_4_ring_emit_hdp_flush */ | ||
1216 | 3 + /* sdma_v2_4_ring_emit_hdp_invalidate */ | ||
1217 | 6 + /* sdma_v2_4_ring_emit_pipeline_sync */ | ||
1218 | 12 + /* sdma_v2_4_ring_emit_vm_flush */ | ||
1219 | 10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */ | ||
1220 | .emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */ | ||
1229 | .emit_ib = sdma_v2_4_ring_emit_ib, | 1221 | .emit_ib = sdma_v2_4_ring_emit_ib, |
1230 | .emit_fence = sdma_v2_4_ring_emit_fence, | 1222 | .emit_fence = sdma_v2_4_ring_emit_fence, |
1231 | .emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync, | 1223 | .emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync, |
@@ -1236,8 +1228,6 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = { | |||
1236 | .test_ib = sdma_v2_4_ring_test_ib, | 1228 | .test_ib = sdma_v2_4_ring_test_ib, |
1237 | .insert_nop = sdma_v2_4_ring_insert_nop, | 1229 | .insert_nop = sdma_v2_4_ring_insert_nop, |
1238 | .pad_ib = sdma_v2_4_ring_pad_ib, | 1230 | .pad_ib = sdma_v2_4_ring_pad_ib, |
1239 | .get_emit_ib_size = sdma_v2_4_ring_get_emit_ib_size, | ||
1240 | .get_dma_frame_size = sdma_v2_4_ring_get_dma_frame_size, | ||
1241 | }; | 1231 | }; |
1242 | 1232 | ||
1243 | static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev) | 1233 | static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev) |
@@ -1350,3 +1340,12 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev) | |||
1350 | adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; | 1340 | adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; |
1351 | } | 1341 | } |
1352 | } | 1342 | } |
1343 | |||
1344 | const struct amdgpu_ip_block_version sdma_v2_4_ip_block = | ||
1345 | { | ||
1346 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1347 | .major = 2, | ||
1348 | .minor = 4, | ||
1349 | .rev = 0, | ||
1350 | .funcs = &sdma_v2_4_ip_funcs, | ||
1351 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h index 07349f5ee10f..28b433729216 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __SDMA_V2_4_H__ | 24 | #ifndef __SDMA_V2_4_H__ |
25 | #define __SDMA_V2_4_H__ | 25 | #define __SDMA_V2_4_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs sdma_v2_4_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version sdma_v2_4_ip_block; |
28 | 28 | ||
29 | #endif | 29 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index a9d10941fb53..6172d01e985a 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | |||
@@ -392,10 +392,10 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | |||
392 | 392 | ||
393 | for (i = 0; i < count; i++) | 393 | for (i = 0; i < count; i++) |
394 | if (sdma && sdma->burst_nop && (i == 0)) | 394 | if (sdma && sdma->burst_nop && (i == 0)) |
395 | amdgpu_ring_write(ring, ring->nop | | 395 | amdgpu_ring_write(ring, ring->funcs->nop | |
396 | SDMA_PKT_NOP_HEADER_COUNT(count - 1)); | 396 | SDMA_PKT_NOP_HEADER_COUNT(count - 1)); |
397 | else | 397 | else |
398 | amdgpu_ring_write(ring, ring->nop); | 398 | amdgpu_ring_write(ring, ring->funcs->nop); |
399 | } | 399 | } |
400 | 400 | ||
401 | /** | 401 | /** |
@@ -1104,22 +1104,6 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring, | |||
1104 | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ | 1104 | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ |
1105 | } | 1105 | } |
1106 | 1106 | ||
1107 | static unsigned sdma_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring) | ||
1108 | { | ||
1109 | return | ||
1110 | 7 + 6; /* sdma_v3_0_ring_emit_ib */ | ||
1111 | } | ||
1112 | |||
1113 | static unsigned sdma_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring) | ||
1114 | { | ||
1115 | return | ||
1116 | 6 + /* sdma_v3_0_ring_emit_hdp_flush */ | ||
1117 | 3 + /* sdma_v3_0_ring_emit_hdp_invalidate */ | ||
1118 | 6 + /* sdma_v3_0_ring_emit_pipeline_sync */ | ||
1119 | 12 + /* sdma_v3_0_ring_emit_vm_flush */ | ||
1120 | 10 + 10 + 10; /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */ | ||
1121 | } | ||
1122 | |||
1123 | static int sdma_v3_0_early_init(void *handle) | 1107 | static int sdma_v3_0_early_init(void *handle) |
1124 | { | 1108 | { |
1125 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1109 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
@@ -1177,11 +1161,10 @@ static int sdma_v3_0_sw_init(void *handle) | |||
1177 | 1161 | ||
1178 | sprintf(ring->name, "sdma%d", i); | 1162 | sprintf(ring->name, "sdma%d", i); |
1179 | r = amdgpu_ring_init(adev, ring, 1024, | 1163 | r = amdgpu_ring_init(adev, ring, 1024, |
1180 | SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, | ||
1181 | &adev->sdma.trap_irq, | 1164 | &adev->sdma.trap_irq, |
1182 | (i == 0) ? | 1165 | (i == 0) ? |
1183 | AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1, | 1166 | AMDGPU_SDMA_IRQ_TRAP0 : |
1184 | AMDGPU_RING_TYPE_SDMA); | 1167 | AMDGPU_SDMA_IRQ_TRAP1); |
1185 | if (r) | 1168 | if (r) |
1186 | return r; | 1169 | return r; |
1187 | } | 1170 | } |
@@ -1544,7 +1527,7 @@ static int sdma_v3_0_set_powergating_state(void *handle, | |||
1544 | return 0; | 1527 | return 0; |
1545 | } | 1528 | } |
1546 | 1529 | ||
1547 | const struct amd_ip_funcs sdma_v3_0_ip_funcs = { | 1530 | static const struct amd_ip_funcs sdma_v3_0_ip_funcs = { |
1548 | .name = "sdma_v3_0", | 1531 | .name = "sdma_v3_0", |
1549 | .early_init = sdma_v3_0_early_init, | 1532 | .early_init = sdma_v3_0_early_init, |
1550 | .late_init = NULL, | 1533 | .late_init = NULL, |
@@ -1565,10 +1548,19 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = { | |||
1565 | }; | 1548 | }; |
1566 | 1549 | ||
1567 | static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = { | 1550 | static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = { |
1551 | .type = AMDGPU_RING_TYPE_SDMA, | ||
1552 | .align_mask = 0xf, | ||
1553 | .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), | ||
1568 | .get_rptr = sdma_v3_0_ring_get_rptr, | 1554 | .get_rptr = sdma_v3_0_ring_get_rptr, |
1569 | .get_wptr = sdma_v3_0_ring_get_wptr, | 1555 | .get_wptr = sdma_v3_0_ring_get_wptr, |
1570 | .set_wptr = sdma_v3_0_ring_set_wptr, | 1556 | .set_wptr = sdma_v3_0_ring_set_wptr, |
1571 | .parse_cs = NULL, | 1557 | .emit_frame_size = |
1558 | 6 + /* sdma_v3_0_ring_emit_hdp_flush */ | ||
1559 | 3 + /* sdma_v3_0_ring_emit_hdp_invalidate */ | ||
1560 | 6 + /* sdma_v3_0_ring_emit_pipeline_sync */ | ||
1561 | 12 + /* sdma_v3_0_ring_emit_vm_flush */ | ||
1562 | 10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */ | ||
1563 | .emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */ | ||
1572 | .emit_ib = sdma_v3_0_ring_emit_ib, | 1564 | .emit_ib = sdma_v3_0_ring_emit_ib, |
1573 | .emit_fence = sdma_v3_0_ring_emit_fence, | 1565 | .emit_fence = sdma_v3_0_ring_emit_fence, |
1574 | .emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync, | 1566 | .emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync, |
@@ -1579,8 +1571,6 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = { | |||
1579 | .test_ib = sdma_v3_0_ring_test_ib, | 1571 | .test_ib = sdma_v3_0_ring_test_ib, |
1580 | .insert_nop = sdma_v3_0_ring_insert_nop, | 1572 | .insert_nop = sdma_v3_0_ring_insert_nop, |
1581 | .pad_ib = sdma_v3_0_ring_pad_ib, | 1573 | .pad_ib = sdma_v3_0_ring_pad_ib, |
1582 | .get_emit_ib_size = sdma_v3_0_ring_get_emit_ib_size, | ||
1583 | .get_dma_frame_size = sdma_v3_0_ring_get_dma_frame_size, | ||
1584 | }; | 1574 | }; |
1585 | 1575 | ||
1586 | static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev) | 1576 | static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev) |
@@ -1693,3 +1683,21 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev) | |||
1693 | adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; | 1683 | adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; |
1694 | } | 1684 | } |
1695 | } | 1685 | } |
1686 | |||
1687 | const struct amdgpu_ip_block_version sdma_v3_0_ip_block = | ||
1688 | { | ||
1689 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1690 | .major = 3, | ||
1691 | .minor = 0, | ||
1692 | .rev = 0, | ||
1693 | .funcs = &sdma_v3_0_ip_funcs, | ||
1694 | }; | ||
1695 | |||
1696 | const struct amdgpu_ip_block_version sdma_v3_1_ip_block = | ||
1697 | { | ||
1698 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1699 | .major = 3, | ||
1700 | .minor = 1, | ||
1701 | .rev = 0, | ||
1702 | .funcs = &sdma_v3_0_ip_funcs, | ||
1703 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h index 0cb9698a3054..7aa223d35f1b 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h | |||
@@ -24,6 +24,7 @@ | |||
24 | #ifndef __SDMA_V3_0_H__ | 24 | #ifndef __SDMA_V3_0_H__ |
25 | #define __SDMA_V3_0_H__ | 25 | #define __SDMA_V3_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs sdma_v3_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version sdma_v3_0_ip_block; |
28 | extern const struct amdgpu_ip_block_version sdma_v3_1_ip_block; | ||
28 | 29 | ||
29 | #endif | 30 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index dc9511c5ecb8..3ed8ad8725b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include "si_dma.h" | 39 | #include "si_dma.h" |
40 | #include "dce_v6_0.h" | 40 | #include "dce_v6_0.h" |
41 | #include "si.h" | 41 | #include "si.h" |
42 | #include "dce_virtual.h" | ||
42 | 43 | ||
43 | static const u32 tahiti_golden_registers[] = | 44 | static const u32 tahiti_golden_registers[] = |
44 | { | 45 | { |
@@ -905,7 +906,7 @@ static void si_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) | |||
905 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); | 906 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); |
906 | } | 907 | } |
907 | 908 | ||
908 | u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg) | 909 | static u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg) |
909 | { | 910 | { |
910 | unsigned long flags; | 911 | unsigned long flags; |
911 | u32 r; | 912 | u32 r; |
@@ -918,7 +919,7 @@ u32 si_pciep_rreg(struct amdgpu_device *adev, u32 reg) | |||
918 | return r; | 919 | return r; |
919 | } | 920 | } |
920 | 921 | ||
921 | void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v) | 922 | static void si_pciep_wreg(struct amdgpu_device *adev, u32 reg, u32 v) |
922 | { | 923 | { |
923 | unsigned long flags; | 924 | unsigned long flags; |
924 | 925 | ||
@@ -1811,7 +1812,7 @@ static int si_common_set_powergating_state(void *handle, | |||
1811 | return 0; | 1812 | return 0; |
1812 | } | 1813 | } |
1813 | 1814 | ||
1814 | const struct amd_ip_funcs si_common_ip_funcs = { | 1815 | static const struct amd_ip_funcs si_common_ip_funcs = { |
1815 | .name = "si_common", | 1816 | .name = "si_common", |
1816 | .early_init = si_common_early_init, | 1817 | .early_init = si_common_early_init, |
1817 | .late_init = NULL, | 1818 | .late_init = NULL, |
@@ -1828,119 +1829,13 @@ const struct amd_ip_funcs si_common_ip_funcs = { | |||
1828 | .set_powergating_state = si_common_set_powergating_state, | 1829 | .set_powergating_state = si_common_set_powergating_state, |
1829 | }; | 1830 | }; |
1830 | 1831 | ||
1831 | static const struct amdgpu_ip_block_version verde_ip_blocks[] = | 1832 | static const struct amdgpu_ip_block_version si_common_ip_block = |
1832 | { | 1833 | { |
1833 | { | 1834 | .type = AMD_IP_BLOCK_TYPE_COMMON, |
1834 | .type = AMD_IP_BLOCK_TYPE_COMMON, | 1835 | .major = 1, |
1835 | .major = 1, | 1836 | .minor = 0, |
1836 | .minor = 0, | 1837 | .rev = 0, |
1837 | .rev = 0, | 1838 | .funcs = &si_common_ip_funcs, |
1838 | .funcs = &si_common_ip_funcs, | ||
1839 | }, | ||
1840 | { | ||
1841 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1842 | .major = 6, | ||
1843 | .minor = 0, | ||
1844 | .rev = 0, | ||
1845 | .funcs = &gmc_v6_0_ip_funcs, | ||
1846 | }, | ||
1847 | { | ||
1848 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1849 | .major = 1, | ||
1850 | .minor = 0, | ||
1851 | .rev = 0, | ||
1852 | .funcs = &si_ih_ip_funcs, | ||
1853 | }, | ||
1854 | { | ||
1855 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1856 | .major = 6, | ||
1857 | .minor = 0, | ||
1858 | .rev = 0, | ||
1859 | .funcs = &amdgpu_pp_ip_funcs, | ||
1860 | }, | ||
1861 | { | ||
1862 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
1863 | .major = 6, | ||
1864 | .minor = 0, | ||
1865 | .rev = 0, | ||
1866 | .funcs = &dce_v6_0_ip_funcs, | ||
1867 | }, | ||
1868 | { | ||
1869 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1870 | .major = 6, | ||
1871 | .minor = 0, | ||
1872 | .rev = 0, | ||
1873 | .funcs = &gfx_v6_0_ip_funcs, | ||
1874 | }, | ||
1875 | { | ||
1876 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1877 | .major = 1, | ||
1878 | .minor = 0, | ||
1879 | .rev = 0, | ||
1880 | .funcs = &si_dma_ip_funcs, | ||
1881 | }, | ||
1882 | /* { | ||
1883 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1884 | .major = 3, | ||
1885 | .minor = 1, | ||
1886 | .rev = 0, | ||
1887 | .funcs = &si_null_ip_funcs, | ||
1888 | }, | ||
1889 | { | ||
1890 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1891 | .major = 1, | ||
1892 | .minor = 0, | ||
1893 | .rev = 0, | ||
1894 | .funcs = &si_null_ip_funcs, | ||
1895 | }, | ||
1896 | */ | ||
1897 | }; | ||
1898 | |||
1899 | |||
1900 | static const struct amdgpu_ip_block_version hainan_ip_blocks[] = | ||
1901 | { | ||
1902 | { | ||
1903 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1904 | .major = 1, | ||
1905 | .minor = 0, | ||
1906 | .rev = 0, | ||
1907 | .funcs = &si_common_ip_funcs, | ||
1908 | }, | ||
1909 | { | ||
1910 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1911 | .major = 6, | ||
1912 | .minor = 0, | ||
1913 | .rev = 0, | ||
1914 | .funcs = &gmc_v6_0_ip_funcs, | ||
1915 | }, | ||
1916 | { | ||
1917 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1918 | .major = 1, | ||
1919 | .minor = 0, | ||
1920 | .rev = 0, | ||
1921 | .funcs = &si_ih_ip_funcs, | ||
1922 | }, | ||
1923 | { | ||
1924 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1925 | .major = 6, | ||
1926 | .minor = 0, | ||
1927 | .rev = 0, | ||
1928 | .funcs = &amdgpu_pp_ip_funcs, | ||
1929 | }, | ||
1930 | { | ||
1931 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1932 | .major = 6, | ||
1933 | .minor = 0, | ||
1934 | .rev = 0, | ||
1935 | .funcs = &gfx_v6_0_ip_funcs, | ||
1936 | }, | ||
1937 | { | ||
1938 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1939 | .major = 1, | ||
1940 | .minor = 0, | ||
1941 | .rev = 0, | ||
1942 | .funcs = &si_dma_ip_funcs, | ||
1943 | }, | ||
1944 | }; | 1839 | }; |
1945 | 1840 | ||
1946 | int si_set_ip_blocks(struct amdgpu_device *adev) | 1841 | int si_set_ip_blocks(struct amdgpu_device *adev) |
@@ -1949,13 +1844,42 @@ int si_set_ip_blocks(struct amdgpu_device *adev) | |||
1949 | case CHIP_VERDE: | 1844 | case CHIP_VERDE: |
1950 | case CHIP_TAHITI: | 1845 | case CHIP_TAHITI: |
1951 | case CHIP_PITCAIRN: | 1846 | case CHIP_PITCAIRN: |
1847 | amdgpu_ip_block_add(adev, &si_common_ip_block); | ||
1848 | amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block); | ||
1849 | amdgpu_ip_block_add(adev, &si_ih_ip_block); | ||
1850 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1851 | if (adev->enable_virtual_display) | ||
1852 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1853 | else | ||
1854 | amdgpu_ip_block_add(adev, &dce_v6_0_ip_block); | ||
1855 | amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block); | ||
1856 | amdgpu_ip_block_add(adev, &si_dma_ip_block); | ||
1857 | /* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */ | ||
1858 | /* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */ | ||
1859 | break; | ||
1952 | case CHIP_OLAND: | 1860 | case CHIP_OLAND: |
1953 | adev->ip_blocks = verde_ip_blocks; | 1861 | amdgpu_ip_block_add(adev, &si_common_ip_block); |
1954 | adev->num_ip_blocks = ARRAY_SIZE(verde_ip_blocks); | 1862 | amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block); |
1863 | amdgpu_ip_block_add(adev, &si_ih_ip_block); | ||
1864 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1865 | if (adev->enable_virtual_display) | ||
1866 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1867 | else | ||
1868 | amdgpu_ip_block_add(adev, &dce_v6_4_ip_block); | ||
1869 | amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block); | ||
1870 | amdgpu_ip_block_add(adev, &si_dma_ip_block); | ||
1871 | /* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */ | ||
1872 | /* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */ | ||
1955 | break; | 1873 | break; |
1956 | case CHIP_HAINAN: | 1874 | case CHIP_HAINAN: |
1957 | adev->ip_blocks = hainan_ip_blocks; | 1875 | amdgpu_ip_block_add(adev, &si_common_ip_block); |
1958 | adev->num_ip_blocks = ARRAY_SIZE(hainan_ip_blocks); | 1876 | amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block); |
1877 | amdgpu_ip_block_add(adev, &si_ih_ip_block); | ||
1878 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1879 | if (adev->enable_virtual_display) | ||
1880 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1881 | amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block); | ||
1882 | amdgpu_ip_block_add(adev, &si_dma_ip_block); | ||
1959 | break; | 1883 | break; |
1960 | default: | 1884 | default: |
1961 | BUG(); | 1885 | BUG(); |
diff --git a/drivers/gpu/drm/amd/amdgpu/si.h b/drivers/gpu/drm/amd/amdgpu/si.h index 959d7b63e0e5..589225080c24 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.h +++ b/drivers/gpu/drm/amd/amdgpu/si.h | |||
@@ -24,8 +24,6 @@ | |||
24 | #ifndef __SI_H__ | 24 | #ifndef __SI_H__ |
25 | #define __SI_H__ | 25 | #define __SI_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs si_common_ip_funcs; | ||
28 | |||
29 | void si_srbm_select(struct amdgpu_device *adev, | 27 | void si_srbm_select(struct amdgpu_device *adev, |
30 | u32 me, u32 pipe, u32 queue, u32 vmid); | 28 | u32 me, u32 pipe, u32 queue, u32 vmid); |
31 | int si_set_ip_blocks(struct amdgpu_device *adev); | 29 | int si_set_ip_blocks(struct amdgpu_device *adev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index de358193a8f9..14265c5c349e 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c | |||
@@ -495,22 +495,6 @@ static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring, | |||
495 | amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */ | 495 | amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */ |
496 | } | 496 | } |
497 | 497 | ||
498 | static unsigned si_dma_ring_get_emit_ib_size(struct amdgpu_ring *ring) | ||
499 | { | ||
500 | return | ||
501 | 7 + 3; /* si_dma_ring_emit_ib */ | ||
502 | } | ||
503 | |||
504 | static unsigned si_dma_ring_get_dma_frame_size(struct amdgpu_ring *ring) | ||
505 | { | ||
506 | return | ||
507 | 3 + /* si_dma_ring_emit_hdp_flush */ | ||
508 | 3 + /* si_dma_ring_emit_hdp_invalidate */ | ||
509 | 6 + /* si_dma_ring_emit_pipeline_sync */ | ||
510 | 12 + /* si_dma_ring_emit_vm_flush */ | ||
511 | 9 + 9 + 9; /* si_dma_ring_emit_fence x3 for user fence, vm fence */ | ||
512 | } | ||
513 | |||
514 | static int si_dma_early_init(void *handle) | 498 | static int si_dma_early_init(void *handle) |
515 | { | 499 | { |
516 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 500 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
@@ -547,11 +531,10 @@ static int si_dma_sw_init(void *handle) | |||
547 | ring->use_doorbell = false; | 531 | ring->use_doorbell = false; |
548 | sprintf(ring->name, "sdma%d", i); | 532 | sprintf(ring->name, "sdma%d", i); |
549 | r = amdgpu_ring_init(adev, ring, 1024, | 533 | r = amdgpu_ring_init(adev, ring, 1024, |
550 | DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), 0xf, | ||
551 | &adev->sdma.trap_irq, | 534 | &adev->sdma.trap_irq, |
552 | (i == 0) ? | 535 | (i == 0) ? |
553 | AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1, | 536 | AMDGPU_SDMA_IRQ_TRAP0 : |
554 | AMDGPU_RING_TYPE_SDMA); | 537 | AMDGPU_SDMA_IRQ_TRAP1); |
555 | if (r) | 538 | if (r) |
556 | return r; | 539 | return r; |
557 | } | 540 | } |
@@ -762,7 +745,7 @@ static int si_dma_set_powergating_state(void *handle, | |||
762 | return 0; | 745 | return 0; |
763 | } | 746 | } |
764 | 747 | ||
765 | const struct amd_ip_funcs si_dma_ip_funcs = { | 748 | static const struct amd_ip_funcs si_dma_ip_funcs = { |
766 | .name = "si_dma", | 749 | .name = "si_dma", |
767 | .early_init = si_dma_early_init, | 750 | .early_init = si_dma_early_init, |
768 | .late_init = NULL, | 751 | .late_init = NULL, |
@@ -780,10 +763,19 @@ const struct amd_ip_funcs si_dma_ip_funcs = { | |||
780 | }; | 763 | }; |
781 | 764 | ||
782 | static const struct amdgpu_ring_funcs si_dma_ring_funcs = { | 765 | static const struct amdgpu_ring_funcs si_dma_ring_funcs = { |
766 | .type = AMDGPU_RING_TYPE_SDMA, | ||
767 | .align_mask = 0xf, | ||
768 | .nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), | ||
783 | .get_rptr = si_dma_ring_get_rptr, | 769 | .get_rptr = si_dma_ring_get_rptr, |
784 | .get_wptr = si_dma_ring_get_wptr, | 770 | .get_wptr = si_dma_ring_get_wptr, |
785 | .set_wptr = si_dma_ring_set_wptr, | 771 | .set_wptr = si_dma_ring_set_wptr, |
786 | .parse_cs = NULL, | 772 | .emit_frame_size = |
773 | 3 + /* si_dma_ring_emit_hdp_flush */ | ||
774 | 3 + /* si_dma_ring_emit_hdp_invalidate */ | ||
775 | 6 + /* si_dma_ring_emit_pipeline_sync */ | ||
776 | 12 + /* si_dma_ring_emit_vm_flush */ | ||
777 | 9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */ | ||
778 | .emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */ | ||
787 | .emit_ib = si_dma_ring_emit_ib, | 779 | .emit_ib = si_dma_ring_emit_ib, |
788 | .emit_fence = si_dma_ring_emit_fence, | 780 | .emit_fence = si_dma_ring_emit_fence, |
789 | .emit_pipeline_sync = si_dma_ring_emit_pipeline_sync, | 781 | .emit_pipeline_sync = si_dma_ring_emit_pipeline_sync, |
@@ -794,8 +786,6 @@ static const struct amdgpu_ring_funcs si_dma_ring_funcs = { | |||
794 | .test_ib = si_dma_ring_test_ib, | 786 | .test_ib = si_dma_ring_test_ib, |
795 | .insert_nop = amdgpu_ring_insert_nop, | 787 | .insert_nop = amdgpu_ring_insert_nop, |
796 | .pad_ib = si_dma_ring_pad_ib, | 788 | .pad_ib = si_dma_ring_pad_ib, |
797 | .get_emit_ib_size = si_dma_ring_get_emit_ib_size, | ||
798 | .get_dma_frame_size = si_dma_ring_get_dma_frame_size, | ||
799 | }; | 789 | }; |
800 | 790 | ||
801 | static void si_dma_set_ring_funcs(struct amdgpu_device *adev) | 791 | static void si_dma_set_ring_funcs(struct amdgpu_device *adev) |
@@ -913,3 +903,12 @@ static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev) | |||
913 | adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; | 903 | adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances; |
914 | } | 904 | } |
915 | } | 905 | } |
906 | |||
907 | const struct amdgpu_ip_block_version si_dma_ip_block = | ||
908 | { | ||
909 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
910 | .major = 1, | ||
911 | .minor = 0, | ||
912 | .rev = 0, | ||
913 | .funcs = &si_dma_ip_funcs, | ||
914 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.h b/drivers/gpu/drm/amd/amdgpu/si_dma.h index 3a3e0c78a54b..5ac1b8452fb4 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.h +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __SI_DMA_H__ | 24 | #ifndef __SI_DMA_H__ |
25 | #define __SI_DMA_H__ | 25 | #define __SI_DMA_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs si_dma_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version si_dma_ip_block; |
28 | 28 | ||
29 | #endif | 29 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index 3de7bca5854b..917213396787 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c | |||
@@ -3171,6 +3171,7 @@ static void ni_update_current_ps(struct amdgpu_device *adev, | |||
3171 | eg_pi->current_rps = *rps; | 3171 | eg_pi->current_rps = *rps; |
3172 | ni_pi->current_ps = *new_ps; | 3172 | ni_pi->current_ps = *new_ps; |
3173 | eg_pi->current_rps.ps_priv = &ni_pi->current_ps; | 3173 | eg_pi->current_rps.ps_priv = &ni_pi->current_ps; |
3174 | adev->pm.dpm.current_ps = &eg_pi->current_rps; | ||
3174 | } | 3175 | } |
3175 | 3176 | ||
3176 | static void ni_update_requested_ps(struct amdgpu_device *adev, | 3177 | static void ni_update_requested_ps(struct amdgpu_device *adev, |
@@ -3183,6 +3184,7 @@ static void ni_update_requested_ps(struct amdgpu_device *adev, | |||
3183 | eg_pi->requested_rps = *rps; | 3184 | eg_pi->requested_rps = *rps; |
3184 | ni_pi->requested_ps = *new_ps; | 3185 | ni_pi->requested_ps = *new_ps; |
3185 | eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps; | 3186 | eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps; |
3187 | adev->pm.dpm.requested_ps = &eg_pi->requested_rps; | ||
3186 | } | 3188 | } |
3187 | 3189 | ||
3188 | static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev, | 3190 | static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev, |
@@ -7320,7 +7322,7 @@ static int si_parse_power_table(struct amdgpu_device *adev) | |||
7320 | adev->pm.dpm.num_ps = state_array->ucNumEntries; | 7322 | adev->pm.dpm.num_ps = state_array->ucNumEntries; |
7321 | 7323 | ||
7322 | /* fill in the vce power states */ | 7324 | /* fill in the vce power states */ |
7323 | for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) { | 7325 | for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { |
7324 | u32 sclk, mclk; | 7326 | u32 sclk, mclk; |
7325 | clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; | 7327 | clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; |
7326 | clock_info = (union pplib_clock_info *) | 7328 | clock_info = (union pplib_clock_info *) |
@@ -7957,6 +7959,57 @@ static int si_dpm_early_init(void *handle) | |||
7957 | return 0; | 7959 | return 0; |
7958 | } | 7960 | } |
7959 | 7961 | ||
7962 | static inline bool si_are_power_levels_equal(const struct rv7xx_pl *si_cpl1, | ||
7963 | const struct rv7xx_pl *si_cpl2) | ||
7964 | { | ||
7965 | return ((si_cpl1->mclk == si_cpl2->mclk) && | ||
7966 | (si_cpl1->sclk == si_cpl2->sclk) && | ||
7967 | (si_cpl1->pcie_gen == si_cpl2->pcie_gen) && | ||
7968 | (si_cpl1->vddc == si_cpl2->vddc) && | ||
7969 | (si_cpl1->vddci == si_cpl2->vddci)); | ||
7970 | } | ||
7971 | |||
7972 | static int si_check_state_equal(struct amdgpu_device *adev, | ||
7973 | struct amdgpu_ps *cps, | ||
7974 | struct amdgpu_ps *rps, | ||
7975 | bool *equal) | ||
7976 | { | ||
7977 | struct si_ps *si_cps; | ||
7978 | struct si_ps *si_rps; | ||
7979 | int i; | ||
7980 | |||
7981 | if (adev == NULL || cps == NULL || rps == NULL || equal == NULL) | ||
7982 | return -EINVAL; | ||
7983 | |||
7984 | si_cps = si_get_ps(cps); | ||
7985 | si_rps = si_get_ps(rps); | ||
7986 | |||
7987 | if (si_cps == NULL) { | ||
7988 | printk("si_cps is NULL\n"); | ||
7989 | *equal = false; | ||
7990 | return 0; | ||
7991 | } | ||
7992 | |||
7993 | if (si_cps->performance_level_count != si_rps->performance_level_count) { | ||
7994 | *equal = false; | ||
7995 | return 0; | ||
7996 | } | ||
7997 | |||
7998 | for (i = 0; i < si_cps->performance_level_count; i++) { | ||
7999 | if (!si_are_power_levels_equal(&(si_cps->performance_levels[i]), | ||
8000 | &(si_rps->performance_levels[i]))) { | ||
8001 | *equal = false; | ||
8002 | return 0; | ||
8003 | } | ||
8004 | } | ||
8005 | |||
8006 | /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ | ||
8007 | *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk)); | ||
8008 | *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk)); | ||
8009 | |||
8010 | return 0; | ||
8011 | } | ||
8012 | |||
7960 | 8013 | ||
7961 | const struct amd_ip_funcs si_dpm_ip_funcs = { | 8014 | const struct amd_ip_funcs si_dpm_ip_funcs = { |
7962 | .name = "si_dpm", | 8015 | .name = "si_dpm", |
@@ -7991,6 +8044,8 @@ static const struct amdgpu_dpm_funcs si_dpm_funcs = { | |||
7991 | .get_fan_control_mode = &si_dpm_get_fan_control_mode, | 8044 | .get_fan_control_mode = &si_dpm_get_fan_control_mode, |
7992 | .set_fan_speed_percent = &si_dpm_set_fan_speed_percent, | 8045 | .set_fan_speed_percent = &si_dpm_set_fan_speed_percent, |
7993 | .get_fan_speed_percent = &si_dpm_get_fan_speed_percent, | 8046 | .get_fan_speed_percent = &si_dpm_get_fan_speed_percent, |
8047 | .check_state_equal = &si_check_state_equal, | ||
8048 | .get_vce_clock_state = amdgpu_get_vce_clock_state, | ||
7994 | }; | 8049 | }; |
7995 | 8050 | ||
7996 | static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev) | 8051 | static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev) |
@@ -8010,3 +8065,11 @@ static void si_dpm_set_irq_funcs(struct amdgpu_device *adev) | |||
8010 | adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs; | 8065 | adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs; |
8011 | } | 8066 | } |
8012 | 8067 | ||
8068 | const struct amdgpu_ip_block_version si_dpm_ip_block = | ||
8069 | { | ||
8070 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
8071 | .major = 6, | ||
8072 | .minor = 0, | ||
8073 | .rev = 0, | ||
8074 | .funcs = &si_dpm_ip_funcs, | ||
8075 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c index 8fae3d4a2360..db0f36846661 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c | |||
@@ -268,7 +268,7 @@ static int si_ih_set_powergating_state(void *handle, | |||
268 | return 0; | 268 | return 0; |
269 | } | 269 | } |
270 | 270 | ||
271 | const struct amd_ip_funcs si_ih_ip_funcs = { | 271 | static const struct amd_ip_funcs si_ih_ip_funcs = { |
272 | .name = "si_ih", | 272 | .name = "si_ih", |
273 | .early_init = si_ih_early_init, | 273 | .early_init = si_ih_early_init, |
274 | .late_init = NULL, | 274 | .late_init = NULL, |
@@ -297,3 +297,11 @@ static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev) | |||
297 | adev->irq.ih_funcs = &si_ih_funcs; | 297 | adev->irq.ih_funcs = &si_ih_funcs; |
298 | } | 298 | } |
299 | 299 | ||
300 | const struct amdgpu_ip_block_version si_ih_ip_block = | ||
301 | { | ||
302 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
303 | .major = 1, | ||
304 | .minor = 0, | ||
305 | .rev = 0, | ||
306 | .funcs = &si_ih_ip_funcs, | ||
307 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.h b/drivers/gpu/drm/amd/amdgpu/si_ih.h index f3e3a954369c..42e64a53e24f 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __SI_IH_H__ | 24 | #ifndef __SI_IH_H__ |
25 | #define __SI_IH_H__ | 25 | #define __SI_IH_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs si_ih_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version si_ih_ip_block; |
28 | 28 | ||
29 | #endif | 29 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index b4ea229bb449..52b71ee58793 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c | |||
@@ -455,7 +455,7 @@ static int tonga_ih_set_powergating_state(void *handle, | |||
455 | return 0; | 455 | return 0; |
456 | } | 456 | } |
457 | 457 | ||
458 | const struct amd_ip_funcs tonga_ih_ip_funcs = { | 458 | static const struct amd_ip_funcs tonga_ih_ip_funcs = { |
459 | .name = "tonga_ih", | 459 | .name = "tonga_ih", |
460 | .early_init = tonga_ih_early_init, | 460 | .early_init = tonga_ih_early_init, |
461 | .late_init = NULL, | 461 | .late_init = NULL, |
@@ -487,3 +487,11 @@ static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev) | |||
487 | adev->irq.ih_funcs = &tonga_ih_funcs; | 487 | adev->irq.ih_funcs = &tonga_ih_funcs; |
488 | } | 488 | } |
489 | 489 | ||
490 | const struct amdgpu_ip_block_version tonga_ih_ip_block = | ||
491 | { | ||
492 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
493 | .major = 3, | ||
494 | .minor = 0, | ||
495 | .rev = 0, | ||
496 | .funcs = &tonga_ih_ip_funcs, | ||
497 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.h b/drivers/gpu/drm/amd/amdgpu/tonga_ih.h index 7392d70fa4a7..499027eee5c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __TONGA_IH_H__ | 24 | #ifndef __TONGA_IH_H__ |
25 | #define __TONGA_IH_H__ | 25 | #define __TONGA_IH_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs tonga_ih_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version tonga_ih_ip_block; |
28 | 28 | ||
29 | #endif /* __CZ_IH_H__ */ | 29 | #endif /* __TONGA_IH_H__ */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index f6c941550b8f..8f9c7d55ddda 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | |||
@@ -36,6 +36,9 @@ | |||
36 | 36 | ||
37 | #include "bif/bif_4_1_d.h" | 37 | #include "bif/bif_4_1_d.h" |
38 | 38 | ||
39 | #include "smu/smu_7_0_1_d.h" | ||
40 | #include "smu/smu_7_0_1_sh_mask.h" | ||
41 | |||
39 | static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); | 42 | static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); |
40 | static void uvd_v4_2_init_cg(struct amdgpu_device *adev); | 43 | static void uvd_v4_2_init_cg(struct amdgpu_device *adev); |
41 | static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); | 44 | static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); |
@@ -116,8 +119,7 @@ static int uvd_v4_2_sw_init(void *handle) | |||
116 | 119 | ||
117 | ring = &adev->uvd.ring; | 120 | ring = &adev->uvd.ring; |
118 | sprintf(ring->name, "uvd"); | 121 | sprintf(ring->name, "uvd"); |
119 | r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf, | 122 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); |
120 | &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); | ||
121 | 123 | ||
122 | return r; | 124 | return r; |
123 | } | 125 | } |
@@ -526,20 +528,6 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, | |||
526 | amdgpu_ring_write(ring, ib->length_dw); | 528 | amdgpu_ring_write(ring, ib->length_dw); |
527 | } | 529 | } |
528 | 530 | ||
529 | static unsigned uvd_v4_2_ring_get_emit_ib_size(struct amdgpu_ring *ring) | ||
530 | { | ||
531 | return | ||
532 | 4; /* uvd_v4_2_ring_emit_ib */ | ||
533 | } | ||
534 | |||
535 | static unsigned uvd_v4_2_ring_get_dma_frame_size(struct amdgpu_ring *ring) | ||
536 | { | ||
537 | return | ||
538 | 2 + /* uvd_v4_2_ring_emit_hdp_flush */ | ||
539 | 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */ | ||
540 | 14; /* uvd_v4_2_ring_emit_fence x1 no user fence */ | ||
541 | } | ||
542 | |||
543 | /** | 531 | /** |
544 | * uvd_v4_2_mc_resume - memory controller programming | 532 | * uvd_v4_2_mc_resume - memory controller programming |
545 | * | 533 | * |
@@ -698,18 +686,34 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev, | |||
698 | return 0; | 686 | return 0; |
699 | } | 687 | } |
700 | 688 | ||
689 | static void uvd_v5_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) | ||
690 | { | ||
691 | u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); | ||
692 | |||
693 | if (enable) | ||
694 | tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK | | ||
695 | GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK); | ||
696 | else | ||
697 | tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK | | ||
698 | GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK); | ||
699 | |||
700 | WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); | ||
701 | } | ||
702 | |||
701 | static int uvd_v4_2_set_clockgating_state(void *handle, | 703 | static int uvd_v4_2_set_clockgating_state(void *handle, |
702 | enum amd_clockgating_state state) | 704 | enum amd_clockgating_state state) |
703 | { | 705 | { |
704 | bool gate = false; | 706 | bool gate = false; |
705 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 707 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
706 | 708 | ||
707 | if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) | ||
708 | return 0; | ||
709 | |||
710 | if (state == AMD_CG_STATE_GATE) | 709 | if (state == AMD_CG_STATE_GATE) |
711 | gate = true; | 710 | gate = true; |
712 | 711 | ||
712 | uvd_v5_0_set_bypass_mode(adev, gate); | ||
713 | |||
714 | if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) | ||
715 | return 0; | ||
716 | |||
713 | uvd_v4_2_enable_mgcg(adev, gate); | 717 | uvd_v4_2_enable_mgcg(adev, gate); |
714 | 718 | ||
715 | return 0; | 719 | return 0; |
@@ -738,7 +742,7 @@ static int uvd_v4_2_set_powergating_state(void *handle, | |||
738 | } | 742 | } |
739 | } | 743 | } |
740 | 744 | ||
741 | const struct amd_ip_funcs uvd_v4_2_ip_funcs = { | 745 | static const struct amd_ip_funcs uvd_v4_2_ip_funcs = { |
742 | .name = "uvd_v4_2", | 746 | .name = "uvd_v4_2", |
743 | .early_init = uvd_v4_2_early_init, | 747 | .early_init = uvd_v4_2_early_init, |
744 | .late_init = NULL, | 748 | .late_init = NULL, |
@@ -756,10 +760,18 @@ const struct amd_ip_funcs uvd_v4_2_ip_funcs = { | |||
756 | }; | 760 | }; |
757 | 761 | ||
758 | static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { | 762 | static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { |
763 | .type = AMDGPU_RING_TYPE_UVD, | ||
764 | .align_mask = 0xf, | ||
765 | .nop = PACKET0(mmUVD_NO_OP, 0), | ||
759 | .get_rptr = uvd_v4_2_ring_get_rptr, | 766 | .get_rptr = uvd_v4_2_ring_get_rptr, |
760 | .get_wptr = uvd_v4_2_ring_get_wptr, | 767 | .get_wptr = uvd_v4_2_ring_get_wptr, |
761 | .set_wptr = uvd_v4_2_ring_set_wptr, | 768 | .set_wptr = uvd_v4_2_ring_set_wptr, |
762 | .parse_cs = amdgpu_uvd_ring_parse_cs, | 769 | .parse_cs = amdgpu_uvd_ring_parse_cs, |
770 | .emit_frame_size = | ||
771 | 2 + /* uvd_v4_2_ring_emit_hdp_flush */ | ||
772 | 2 + /* uvd_v4_2_ring_emit_hdp_invalidate */ | ||
773 | 14, /* uvd_v4_2_ring_emit_fence x1 no user fence */ | ||
774 | .emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */ | ||
763 | .emit_ib = uvd_v4_2_ring_emit_ib, | 775 | .emit_ib = uvd_v4_2_ring_emit_ib, |
764 | .emit_fence = uvd_v4_2_ring_emit_fence, | 776 | .emit_fence = uvd_v4_2_ring_emit_fence, |
765 | .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush, | 777 | .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush, |
@@ -770,8 +782,6 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { | |||
770 | .pad_ib = amdgpu_ring_generic_pad_ib, | 782 | .pad_ib = amdgpu_ring_generic_pad_ib, |
771 | .begin_use = amdgpu_uvd_ring_begin_use, | 783 | .begin_use = amdgpu_uvd_ring_begin_use, |
772 | .end_use = amdgpu_uvd_ring_end_use, | 784 | .end_use = amdgpu_uvd_ring_end_use, |
773 | .get_emit_ib_size = uvd_v4_2_ring_get_emit_ib_size, | ||
774 | .get_dma_frame_size = uvd_v4_2_ring_get_dma_frame_size, | ||
775 | }; | 785 | }; |
776 | 786 | ||
777 | static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) | 787 | static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) |
@@ -789,3 +799,12 @@ static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev) | |||
789 | adev->uvd.irq.num_types = 1; | 799 | adev->uvd.irq.num_types = 1; |
790 | adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs; | 800 | adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs; |
791 | } | 801 | } |
802 | |||
803 | const struct amdgpu_ip_block_version uvd_v4_2_ip_block = | ||
804 | { | ||
805 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
806 | .major = 4, | ||
807 | .minor = 2, | ||
808 | .rev = 0, | ||
809 | .funcs = &uvd_v4_2_ip_funcs, | ||
810 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h index 0a615dd50840..8a0444bb8b95 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __UVD_V4_2_H__ | 24 | #ifndef __UVD_V4_2_H__ |
25 | #define __UVD_V4_2_H__ | 25 | #define __UVD_V4_2_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs uvd_v4_2_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version uvd_v4_2_ip_block; |
28 | 28 | ||
29 | #endif | 29 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 400c16fe579e..95303e2d5f92 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | |||
@@ -33,6 +33,8 @@ | |||
33 | #include "oss/oss_2_0_sh_mask.h" | 33 | #include "oss/oss_2_0_sh_mask.h" |
34 | #include "bif/bif_5_0_d.h" | 34 | #include "bif/bif_5_0_d.h" |
35 | #include "vi.h" | 35 | #include "vi.h" |
36 | #include "smu/smu_7_1_2_d.h" | ||
37 | #include "smu/smu_7_1_2_sh_mask.h" | ||
36 | 38 | ||
37 | static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev); | 39 | static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev); |
38 | static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev); | 40 | static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev); |
@@ -112,8 +114,7 @@ static int uvd_v5_0_sw_init(void *handle) | |||
112 | 114 | ||
113 | ring = &adev->uvd.ring; | 115 | ring = &adev->uvd.ring; |
114 | sprintf(ring->name, "uvd"); | 116 | sprintf(ring->name, "uvd"); |
115 | r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf, | 117 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); |
116 | &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); | ||
117 | 118 | ||
118 | return r; | 119 | return r; |
119 | } | 120 | } |
@@ -577,20 +578,6 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, | |||
577 | amdgpu_ring_write(ring, ib->length_dw); | 578 | amdgpu_ring_write(ring, ib->length_dw); |
578 | } | 579 | } |
579 | 580 | ||
580 | static unsigned uvd_v5_0_ring_get_emit_ib_size(struct amdgpu_ring *ring) | ||
581 | { | ||
582 | return | ||
583 | 6; /* uvd_v5_0_ring_emit_ib */ | ||
584 | } | ||
585 | |||
586 | static unsigned uvd_v5_0_ring_get_dma_frame_size(struct amdgpu_ring *ring) | ||
587 | { | ||
588 | return | ||
589 | 2 + /* uvd_v5_0_ring_emit_hdp_flush */ | ||
590 | 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */ | ||
591 | 14; /* uvd_v5_0_ring_emit_fence x1 no user fence */ | ||
592 | } | ||
593 | |||
594 | static bool uvd_v5_0_is_idle(void *handle) | 581 | static bool uvd_v5_0_is_idle(void *handle) |
595 | { | 582 | { |
596 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 583 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
@@ -737,6 +724,20 @@ static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev) | |||
737 | } | 724 | } |
738 | #endif | 725 | #endif |
739 | 726 | ||
727 | static void uvd_v5_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) | ||
728 | { | ||
729 | u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); | ||
730 | |||
731 | if (enable) | ||
732 | tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK | | ||
733 | GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK); | ||
734 | else | ||
735 | tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK | | ||
736 | GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK); | ||
737 | |||
738 | WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); | ||
739 | } | ||
740 | |||
740 | static int uvd_v5_0_set_clockgating_state(void *handle, | 741 | static int uvd_v5_0_set_clockgating_state(void *handle, |
741 | enum amd_clockgating_state state) | 742 | enum amd_clockgating_state state) |
742 | { | 743 | { |
@@ -744,6 +745,8 @@ static int uvd_v5_0_set_clockgating_state(void *handle, | |||
744 | bool enable = (state == AMD_CG_STATE_GATE) ? true : false; | 745 | bool enable = (state == AMD_CG_STATE_GATE) ? true : false; |
745 | static int curstate = -1; | 746 | static int curstate = -1; |
746 | 747 | ||
748 | uvd_v5_0_set_bypass_mode(adev, enable); | ||
749 | |||
747 | if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) | 750 | if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) |
748 | return 0; | 751 | return 0; |
749 | 752 | ||
@@ -789,7 +792,7 @@ static int uvd_v5_0_set_powergating_state(void *handle, | |||
789 | } | 792 | } |
790 | } | 793 | } |
791 | 794 | ||
792 | const struct amd_ip_funcs uvd_v5_0_ip_funcs = { | 795 | static const struct amd_ip_funcs uvd_v5_0_ip_funcs = { |
793 | .name = "uvd_v5_0", | 796 | .name = "uvd_v5_0", |
794 | .early_init = uvd_v5_0_early_init, | 797 | .early_init = uvd_v5_0_early_init, |
795 | .late_init = NULL, | 798 | .late_init = NULL, |
@@ -807,10 +810,18 @@ const struct amd_ip_funcs uvd_v5_0_ip_funcs = { | |||
807 | }; | 810 | }; |
808 | 811 | ||
809 | static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { | 812 | static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { |
813 | .type = AMDGPU_RING_TYPE_UVD, | ||
814 | .align_mask = 0xf, | ||
815 | .nop = PACKET0(mmUVD_NO_OP, 0), | ||
810 | .get_rptr = uvd_v5_0_ring_get_rptr, | 816 | .get_rptr = uvd_v5_0_ring_get_rptr, |
811 | .get_wptr = uvd_v5_0_ring_get_wptr, | 817 | .get_wptr = uvd_v5_0_ring_get_wptr, |
812 | .set_wptr = uvd_v5_0_ring_set_wptr, | 818 | .set_wptr = uvd_v5_0_ring_set_wptr, |
813 | .parse_cs = amdgpu_uvd_ring_parse_cs, | 819 | .parse_cs = amdgpu_uvd_ring_parse_cs, |
820 | .emit_frame_size = | ||
821 | 2 + /* uvd_v5_0_ring_emit_hdp_flush */ | ||
822 | 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */ | ||
823 | 14, /* uvd_v5_0_ring_emit_fence x1 no user fence */ | ||
824 | .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */ | ||
814 | .emit_ib = uvd_v5_0_ring_emit_ib, | 825 | .emit_ib = uvd_v5_0_ring_emit_ib, |
815 | .emit_fence = uvd_v5_0_ring_emit_fence, | 826 | .emit_fence = uvd_v5_0_ring_emit_fence, |
816 | .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush, | 827 | .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush, |
@@ -821,8 +832,6 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { | |||
821 | .pad_ib = amdgpu_ring_generic_pad_ib, | 832 | .pad_ib = amdgpu_ring_generic_pad_ib, |
822 | .begin_use = amdgpu_uvd_ring_begin_use, | 833 | .begin_use = amdgpu_uvd_ring_begin_use, |
823 | .end_use = amdgpu_uvd_ring_end_use, | 834 | .end_use = amdgpu_uvd_ring_end_use, |
824 | .get_emit_ib_size = uvd_v5_0_ring_get_emit_ib_size, | ||
825 | .get_dma_frame_size = uvd_v5_0_ring_get_dma_frame_size, | ||
826 | }; | 835 | }; |
827 | 836 | ||
828 | static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) | 837 | static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) |
@@ -840,3 +849,12 @@ static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev) | |||
840 | adev->uvd.irq.num_types = 1; | 849 | adev->uvd.irq.num_types = 1; |
841 | adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs; | 850 | adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs; |
842 | } | 851 | } |
852 | |||
853 | const struct amdgpu_ip_block_version uvd_v5_0_ip_block = | ||
854 | { | ||
855 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
856 | .major = 5, | ||
857 | .minor = 0, | ||
858 | .rev = 0, | ||
859 | .funcs = &uvd_v5_0_ip_funcs, | ||
860 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h index e3b3c49fa5de..2eaaea793ac5 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __UVD_V5_0_H__ | 24 | #ifndef __UVD_V5_0_H__ |
25 | #define __UVD_V5_0_H__ | 25 | #define __UVD_V5_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs uvd_v5_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version uvd_v5_0_ip_block; |
28 | 28 | ||
29 | #endif | 29 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index ab3df6d75656..a339b5ccb296 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | |||
@@ -116,8 +116,7 @@ static int uvd_v6_0_sw_init(void *handle) | |||
116 | 116 | ||
117 | ring = &adev->uvd.ring; | 117 | ring = &adev->uvd.ring; |
118 | sprintf(ring->name, "uvd"); | 118 | sprintf(ring->name, "uvd"); |
119 | r = amdgpu_ring_init(adev, ring, 512, PACKET0(mmUVD_NO_OP, 0), 0xf, | 119 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); |
120 | &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); | ||
121 | 120 | ||
122 | return r; | 121 | return r; |
123 | } | 122 | } |
@@ -725,31 +724,6 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) | |||
725 | amdgpu_ring_write(ring, 0xE); | 724 | amdgpu_ring_write(ring, 0xE); |
726 | } | 725 | } |
727 | 726 | ||
728 | static unsigned uvd_v6_0_ring_get_emit_ib_size(struct amdgpu_ring *ring) | ||
729 | { | ||
730 | return | ||
731 | 8; /* uvd_v6_0_ring_emit_ib */ | ||
732 | } | ||
733 | |||
734 | static unsigned uvd_v6_0_ring_get_dma_frame_size(struct amdgpu_ring *ring) | ||
735 | { | ||
736 | return | ||
737 | 2 + /* uvd_v6_0_ring_emit_hdp_flush */ | ||
738 | 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */ | ||
739 | 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ | ||
740 | 14; /* uvd_v6_0_ring_emit_fence x1 no user fence */ | ||
741 | } | ||
742 | |||
743 | static unsigned uvd_v6_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring) | ||
744 | { | ||
745 | return | ||
746 | 2 + /* uvd_v6_0_ring_emit_hdp_flush */ | ||
747 | 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */ | ||
748 | 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ | ||
749 | 20 + /* uvd_v6_0_ring_emit_vm_flush */ | ||
750 | 14 + 14; /* uvd_v6_0_ring_emit_fence x2 vm fence */ | ||
751 | } | ||
752 | |||
753 | static bool uvd_v6_0_is_idle(void *handle) | 727 | static bool uvd_v6_0_is_idle(void *handle) |
754 | { | 728 | { |
755 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 729 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
@@ -961,7 +935,7 @@ static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev) | |||
961 | } | 935 | } |
962 | #endif | 936 | #endif |
963 | 937 | ||
964 | static void uvd_v6_set_bypass_mode(struct amdgpu_device *adev, bool enable) | 938 | static void uvd_v6_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) |
965 | { | 939 | { |
966 | u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); | 940 | u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); |
967 | 941 | ||
@@ -979,15 +953,14 @@ static int uvd_v6_0_set_clockgating_state(void *handle, | |||
979 | enum amd_clockgating_state state) | 953 | enum amd_clockgating_state state) |
980 | { | 954 | { |
981 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 955 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
956 | bool enable = (state == AMD_CG_STATE_GATE) ? true : false; | ||
982 | 957 | ||
983 | if (adev->asic_type == CHIP_FIJI || | 958 | uvd_v6_0_set_bypass_mode(adev, enable); |
984 | adev->asic_type == CHIP_POLARIS10) | ||
985 | uvd_v6_set_bypass_mode(adev, state == AMD_CG_STATE_GATE ? true : false); | ||
986 | 959 | ||
987 | if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) | 960 | if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) |
988 | return 0; | 961 | return 0; |
989 | 962 | ||
990 | if (state == AMD_CG_STATE_GATE) { | 963 | if (enable) { |
991 | /* disable HW gating and enable Sw gating */ | 964 | /* disable HW gating and enable Sw gating */ |
992 | uvd_v6_0_set_sw_clock_gating(adev); | 965 | uvd_v6_0_set_sw_clock_gating(adev); |
993 | } else { | 966 | } else { |
@@ -1027,7 +1000,7 @@ static int uvd_v6_0_set_powergating_state(void *handle, | |||
1027 | } | 1000 | } |
1028 | } | 1001 | } |
1029 | 1002 | ||
1030 | const struct amd_ip_funcs uvd_v6_0_ip_funcs = { | 1003 | static const struct amd_ip_funcs uvd_v6_0_ip_funcs = { |
1031 | .name = "uvd_v6_0", | 1004 | .name = "uvd_v6_0", |
1032 | .early_init = uvd_v6_0_early_init, | 1005 | .early_init = uvd_v6_0_early_init, |
1033 | .late_init = NULL, | 1006 | .late_init = NULL, |
@@ -1048,10 +1021,19 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = { | |||
1048 | }; | 1021 | }; |
1049 | 1022 | ||
1050 | static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { | 1023 | static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { |
1024 | .type = AMDGPU_RING_TYPE_UVD, | ||
1025 | .align_mask = 0xf, | ||
1026 | .nop = PACKET0(mmUVD_NO_OP, 0), | ||
1051 | .get_rptr = uvd_v6_0_ring_get_rptr, | 1027 | .get_rptr = uvd_v6_0_ring_get_rptr, |
1052 | .get_wptr = uvd_v6_0_ring_get_wptr, | 1028 | .get_wptr = uvd_v6_0_ring_get_wptr, |
1053 | .set_wptr = uvd_v6_0_ring_set_wptr, | 1029 | .set_wptr = uvd_v6_0_ring_set_wptr, |
1054 | .parse_cs = amdgpu_uvd_ring_parse_cs, | 1030 | .parse_cs = amdgpu_uvd_ring_parse_cs, |
1031 | .emit_frame_size = | ||
1032 | 2 + /* uvd_v6_0_ring_emit_hdp_flush */ | ||
1033 | 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */ | ||
1034 | 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ | ||
1035 | 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */ | ||
1036 | .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */ | ||
1055 | .emit_ib = uvd_v6_0_ring_emit_ib, | 1037 | .emit_ib = uvd_v6_0_ring_emit_ib, |
1056 | .emit_fence = uvd_v6_0_ring_emit_fence, | 1038 | .emit_fence = uvd_v6_0_ring_emit_fence, |
1057 | .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, | 1039 | .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, |
@@ -1062,15 +1044,22 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { | |||
1062 | .pad_ib = amdgpu_ring_generic_pad_ib, | 1044 | .pad_ib = amdgpu_ring_generic_pad_ib, |
1063 | .begin_use = amdgpu_uvd_ring_begin_use, | 1045 | .begin_use = amdgpu_uvd_ring_begin_use, |
1064 | .end_use = amdgpu_uvd_ring_end_use, | 1046 | .end_use = amdgpu_uvd_ring_end_use, |
1065 | .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size, | ||
1066 | .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size, | ||
1067 | }; | 1047 | }; |
1068 | 1048 | ||
1069 | static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { | 1049 | static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { |
1050 | .type = AMDGPU_RING_TYPE_UVD, | ||
1051 | .align_mask = 0xf, | ||
1052 | .nop = PACKET0(mmUVD_NO_OP, 0), | ||
1070 | .get_rptr = uvd_v6_0_ring_get_rptr, | 1053 | .get_rptr = uvd_v6_0_ring_get_rptr, |
1071 | .get_wptr = uvd_v6_0_ring_get_wptr, | 1054 | .get_wptr = uvd_v6_0_ring_get_wptr, |
1072 | .set_wptr = uvd_v6_0_ring_set_wptr, | 1055 | .set_wptr = uvd_v6_0_ring_set_wptr, |
1073 | .parse_cs = NULL, | 1056 | .emit_frame_size = |
1057 | 2 + /* uvd_v6_0_ring_emit_hdp_flush */ | ||
1058 | 2 + /* uvd_v6_0_ring_emit_hdp_invalidate */ | ||
1059 | 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ | ||
1060 | 20 + /* uvd_v6_0_ring_emit_vm_flush */ | ||
1061 | 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */ | ||
1062 | .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */ | ||
1074 | .emit_ib = uvd_v6_0_ring_emit_ib, | 1063 | .emit_ib = uvd_v6_0_ring_emit_ib, |
1075 | .emit_fence = uvd_v6_0_ring_emit_fence, | 1064 | .emit_fence = uvd_v6_0_ring_emit_fence, |
1076 | .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush, | 1065 | .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush, |
@@ -1083,8 +1072,6 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { | |||
1083 | .pad_ib = amdgpu_ring_generic_pad_ib, | 1072 | .pad_ib = amdgpu_ring_generic_pad_ib, |
1084 | .begin_use = amdgpu_uvd_ring_begin_use, | 1073 | .begin_use = amdgpu_uvd_ring_begin_use, |
1085 | .end_use = amdgpu_uvd_ring_end_use, | 1074 | .end_use = amdgpu_uvd_ring_end_use, |
1086 | .get_emit_ib_size = uvd_v6_0_ring_get_emit_ib_size, | ||
1087 | .get_dma_frame_size = uvd_v6_0_ring_get_dma_frame_size_vm, | ||
1088 | }; | 1075 | }; |
1089 | 1076 | ||
1090 | static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) | 1077 | static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) |
@@ -1108,3 +1095,30 @@ static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev) | |||
1108 | adev->uvd.irq.num_types = 1; | 1095 | adev->uvd.irq.num_types = 1; |
1109 | adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs; | 1096 | adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs; |
1110 | } | 1097 | } |
1098 | |||
1099 | const struct amdgpu_ip_block_version uvd_v6_0_ip_block = | ||
1100 | { | ||
1101 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1102 | .major = 6, | ||
1103 | .minor = 0, | ||
1104 | .rev = 0, | ||
1105 | .funcs = &uvd_v6_0_ip_funcs, | ||
1106 | }; | ||
1107 | |||
1108 | const struct amdgpu_ip_block_version uvd_v6_2_ip_block = | ||
1109 | { | ||
1110 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1111 | .major = 6, | ||
1112 | .minor = 2, | ||
1113 | .rev = 0, | ||
1114 | .funcs = &uvd_v6_0_ip_funcs, | ||
1115 | }; | ||
1116 | |||
1117 | const struct amdgpu_ip_block_version uvd_v6_3_ip_block = | ||
1118 | { | ||
1119 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1120 | .major = 6, | ||
1121 | .minor = 3, | ||
1122 | .rev = 0, | ||
1123 | .funcs = &uvd_v6_0_ip_funcs, | ||
1124 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h index 6b92a2352986..d3d48c6428cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h | |||
@@ -24,6 +24,8 @@ | |||
24 | #ifndef __UVD_V6_0_H__ | 24 | #ifndef __UVD_V6_0_H__ |
25 | #define __UVD_V6_0_H__ | 25 | #define __UVD_V6_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs uvd_v6_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version uvd_v6_0_ip_block; |
28 | extern const struct amdgpu_ip_block_version uvd_v6_2_ip_block; | ||
29 | extern const struct amdgpu_ip_block_version uvd_v6_3_ip_block; | ||
28 | 30 | ||
29 | #endif | 31 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index 76e64ad04a53..38ed903dd6f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c | |||
@@ -224,8 +224,8 @@ static int vce_v2_0_sw_init(void *handle) | |||
224 | for (i = 0; i < adev->vce.num_rings; i++) { | 224 | for (i = 0; i < adev->vce.num_rings; i++) { |
225 | ring = &adev->vce.ring[i]; | 225 | ring = &adev->vce.ring[i]; |
226 | sprintf(ring->name, "vce%d", i); | 226 | sprintf(ring->name, "vce%d", i); |
227 | r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf, | 227 | r = amdgpu_ring_init(adev, ring, 512, |
228 | &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); | 228 | &adev->vce.irq, 0); |
229 | if (r) | 229 | if (r) |
230 | return r; | 230 | return r; |
231 | } | 231 | } |
@@ -592,7 +592,7 @@ static int vce_v2_0_set_powergating_state(void *handle, | |||
592 | return vce_v2_0_start(adev); | 592 | return vce_v2_0_start(adev); |
593 | } | 593 | } |
594 | 594 | ||
595 | const struct amd_ip_funcs vce_v2_0_ip_funcs = { | 595 | static const struct amd_ip_funcs vce_v2_0_ip_funcs = { |
596 | .name = "vce_v2_0", | 596 | .name = "vce_v2_0", |
597 | .early_init = vce_v2_0_early_init, | 597 | .early_init = vce_v2_0_early_init, |
598 | .late_init = NULL, | 598 | .late_init = NULL, |
@@ -610,10 +610,15 @@ const struct amd_ip_funcs vce_v2_0_ip_funcs = { | |||
610 | }; | 610 | }; |
611 | 611 | ||
612 | static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = { | 612 | static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = { |
613 | .type = AMDGPU_RING_TYPE_VCE, | ||
614 | .align_mask = 0xf, | ||
615 | .nop = VCE_CMD_NO_OP, | ||
613 | .get_rptr = vce_v2_0_ring_get_rptr, | 616 | .get_rptr = vce_v2_0_ring_get_rptr, |
614 | .get_wptr = vce_v2_0_ring_get_wptr, | 617 | .get_wptr = vce_v2_0_ring_get_wptr, |
615 | .set_wptr = vce_v2_0_ring_set_wptr, | 618 | .set_wptr = vce_v2_0_ring_set_wptr, |
616 | .parse_cs = amdgpu_vce_ring_parse_cs, | 619 | .parse_cs = amdgpu_vce_ring_parse_cs, |
620 | .emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */ | ||
621 | .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */ | ||
617 | .emit_ib = amdgpu_vce_ring_emit_ib, | 622 | .emit_ib = amdgpu_vce_ring_emit_ib, |
618 | .emit_fence = amdgpu_vce_ring_emit_fence, | 623 | .emit_fence = amdgpu_vce_ring_emit_fence, |
619 | .test_ring = amdgpu_vce_ring_test_ring, | 624 | .test_ring = amdgpu_vce_ring_test_ring, |
@@ -622,8 +627,6 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = { | |||
622 | .pad_ib = amdgpu_ring_generic_pad_ib, | 627 | .pad_ib = amdgpu_ring_generic_pad_ib, |
623 | .begin_use = amdgpu_vce_ring_begin_use, | 628 | .begin_use = amdgpu_vce_ring_begin_use, |
624 | .end_use = amdgpu_vce_ring_end_use, | 629 | .end_use = amdgpu_vce_ring_end_use, |
625 | .get_emit_ib_size = amdgpu_vce_ring_get_emit_ib_size, | ||
626 | .get_dma_frame_size = amdgpu_vce_ring_get_dma_frame_size, | ||
627 | }; | 630 | }; |
628 | 631 | ||
629 | static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev) | 632 | static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev) |
@@ -644,3 +647,12 @@ static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev) | |||
644 | adev->vce.irq.num_types = 1; | 647 | adev->vce.irq.num_types = 1; |
645 | adev->vce.irq.funcs = &vce_v2_0_irq_funcs; | 648 | adev->vce.irq.funcs = &vce_v2_0_irq_funcs; |
646 | }; | 649 | }; |
650 | |||
651 | const struct amdgpu_ip_block_version vce_v2_0_ip_block = | ||
652 | { | ||
653 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
654 | .major = 2, | ||
655 | .minor = 0, | ||
656 | .rev = 0, | ||
657 | .funcs = &vce_v2_0_ip_funcs, | ||
658 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h index 0d2ae8a01acd..4d15167654a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.h | |||
@@ -24,6 +24,6 @@ | |||
24 | #ifndef __VCE_V2_0_H__ | 24 | #ifndef __VCE_V2_0_H__ |
25 | #define __VCE_V2_0_H__ | 25 | #define __VCE_V2_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs vce_v2_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version vce_v2_0_ip_block; |
28 | 28 | ||
29 | #endif | 29 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 8533269ec160..5ed2930a8568 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | |||
@@ -389,8 +389,7 @@ static int vce_v3_0_sw_init(void *handle) | |||
389 | for (i = 0; i < adev->vce.num_rings; i++) { | 389 | for (i = 0; i < adev->vce.num_rings; i++) { |
390 | ring = &adev->vce.ring[i]; | 390 | ring = &adev->vce.ring[i]; |
391 | sprintf(ring->name, "vce%d", i); | 391 | sprintf(ring->name, "vce%d", i); |
392 | r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf, | 392 | r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0); |
393 | &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); | ||
394 | if (r) | 393 | if (r) |
395 | return r; | 394 | return r; |
396 | } | 395 | } |
@@ -808,28 +807,7 @@ static void vce_v3_0_emit_pipeline_sync(struct amdgpu_ring *ring) | |||
808 | amdgpu_ring_write(ring, seq); | 807 | amdgpu_ring_write(ring, seq); |
809 | } | 808 | } |
810 | 809 | ||
811 | static unsigned vce_v3_0_ring_get_emit_ib_size(struct amdgpu_ring *ring) | 810 | static const struct amd_ip_funcs vce_v3_0_ip_funcs = { |
812 | { | ||
813 | return | ||
814 | 5; /* vce_v3_0_ring_emit_ib */ | ||
815 | } | ||
816 | |||
817 | static unsigned vce_v3_0_ring_get_dma_frame_size(struct amdgpu_ring *ring) | ||
818 | { | ||
819 | return | ||
820 | 4 + /* vce_v3_0_emit_pipeline_sync */ | ||
821 | 6; /* amdgpu_vce_ring_emit_fence x1 no user fence */ | ||
822 | } | ||
823 | |||
824 | static unsigned vce_v3_0_ring_get_dma_frame_size_vm(struct amdgpu_ring *ring) | ||
825 | { | ||
826 | return | ||
827 | 6 + /* vce_v3_0_emit_vm_flush */ | ||
828 | 4 + /* vce_v3_0_emit_pipeline_sync */ | ||
829 | 6 + 6; /* amdgpu_vce_ring_emit_fence x2 vm fence */ | ||
830 | } | ||
831 | |||
832 | const struct amd_ip_funcs vce_v3_0_ip_funcs = { | ||
833 | .name = "vce_v3_0", | 811 | .name = "vce_v3_0", |
834 | .early_init = vce_v3_0_early_init, | 812 | .early_init = vce_v3_0_early_init, |
835 | .late_init = NULL, | 813 | .late_init = NULL, |
@@ -850,10 +828,17 @@ const struct amd_ip_funcs vce_v3_0_ip_funcs = { | |||
850 | }; | 828 | }; |
851 | 829 | ||
852 | static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = { | 830 | static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = { |
831 | .type = AMDGPU_RING_TYPE_VCE, | ||
832 | .align_mask = 0xf, | ||
833 | .nop = VCE_CMD_NO_OP, | ||
853 | .get_rptr = vce_v3_0_ring_get_rptr, | 834 | .get_rptr = vce_v3_0_ring_get_rptr, |
854 | .get_wptr = vce_v3_0_ring_get_wptr, | 835 | .get_wptr = vce_v3_0_ring_get_wptr, |
855 | .set_wptr = vce_v3_0_ring_set_wptr, | 836 | .set_wptr = vce_v3_0_ring_set_wptr, |
856 | .parse_cs = amdgpu_vce_ring_parse_cs, | 837 | .parse_cs = amdgpu_vce_ring_parse_cs, |
838 | .emit_frame_size = | ||
839 | 4 + /* vce_v3_0_emit_pipeline_sync */ | ||
840 | 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */ | ||
841 | .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */ | ||
857 | .emit_ib = amdgpu_vce_ring_emit_ib, | 842 | .emit_ib = amdgpu_vce_ring_emit_ib, |
858 | .emit_fence = amdgpu_vce_ring_emit_fence, | 843 | .emit_fence = amdgpu_vce_ring_emit_fence, |
859 | .test_ring = amdgpu_vce_ring_test_ring, | 844 | .test_ring = amdgpu_vce_ring_test_ring, |
@@ -862,15 +847,21 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = { | |||
862 | .pad_ib = amdgpu_ring_generic_pad_ib, | 847 | .pad_ib = amdgpu_ring_generic_pad_ib, |
863 | .begin_use = amdgpu_vce_ring_begin_use, | 848 | .begin_use = amdgpu_vce_ring_begin_use, |
864 | .end_use = amdgpu_vce_ring_end_use, | 849 | .end_use = amdgpu_vce_ring_end_use, |
865 | .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size, | ||
866 | .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size, | ||
867 | }; | 850 | }; |
868 | 851 | ||
869 | static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = { | 852 | static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = { |
853 | .type = AMDGPU_RING_TYPE_VCE, | ||
854 | .align_mask = 0xf, | ||
855 | .nop = VCE_CMD_NO_OP, | ||
870 | .get_rptr = vce_v3_0_ring_get_rptr, | 856 | .get_rptr = vce_v3_0_ring_get_rptr, |
871 | .get_wptr = vce_v3_0_ring_get_wptr, | 857 | .get_wptr = vce_v3_0_ring_get_wptr, |
872 | .set_wptr = vce_v3_0_ring_set_wptr, | 858 | .set_wptr = vce_v3_0_ring_set_wptr, |
873 | .parse_cs = NULL, | 859 | .parse_cs = amdgpu_vce_ring_parse_cs_vm, |
860 | .emit_frame_size = | ||
861 | 6 + /* vce_v3_0_emit_vm_flush */ | ||
862 | 4 + /* vce_v3_0_emit_pipeline_sync */ | ||
863 | 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */ | ||
864 | .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */ | ||
874 | .emit_ib = vce_v3_0_ring_emit_ib, | 865 | .emit_ib = vce_v3_0_ring_emit_ib, |
875 | .emit_vm_flush = vce_v3_0_emit_vm_flush, | 866 | .emit_vm_flush = vce_v3_0_emit_vm_flush, |
876 | .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync, | 867 | .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync, |
@@ -881,8 +872,6 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = { | |||
881 | .pad_ib = amdgpu_ring_generic_pad_ib, | 872 | .pad_ib = amdgpu_ring_generic_pad_ib, |
882 | .begin_use = amdgpu_vce_ring_begin_use, | 873 | .begin_use = amdgpu_vce_ring_begin_use, |
883 | .end_use = amdgpu_vce_ring_end_use, | 874 | .end_use = amdgpu_vce_ring_end_use, |
884 | .get_emit_ib_size = vce_v3_0_ring_get_emit_ib_size, | ||
885 | .get_dma_frame_size = vce_v3_0_ring_get_dma_frame_size_vm, | ||
886 | }; | 875 | }; |
887 | 876 | ||
888 | static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) | 877 | static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) |
@@ -910,3 +899,30 @@ static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev) | |||
910 | adev->vce.irq.num_types = 1; | 899 | adev->vce.irq.num_types = 1; |
911 | adev->vce.irq.funcs = &vce_v3_0_irq_funcs; | 900 | adev->vce.irq.funcs = &vce_v3_0_irq_funcs; |
912 | }; | 901 | }; |
902 | |||
903 | const struct amdgpu_ip_block_version vce_v3_0_ip_block = | ||
904 | { | ||
905 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
906 | .major = 3, | ||
907 | .minor = 0, | ||
908 | .rev = 0, | ||
909 | .funcs = &vce_v3_0_ip_funcs, | ||
910 | }; | ||
911 | |||
912 | const struct amdgpu_ip_block_version vce_v3_1_ip_block = | ||
913 | { | ||
914 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
915 | .major = 3, | ||
916 | .minor = 1, | ||
917 | .rev = 0, | ||
918 | .funcs = &vce_v3_0_ip_funcs, | ||
919 | }; | ||
920 | |||
921 | const struct amdgpu_ip_block_version vce_v3_4_ip_block = | ||
922 | { | ||
923 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
924 | .major = 3, | ||
925 | .minor = 4, | ||
926 | .rev = 0, | ||
927 | .funcs = &vce_v3_0_ip_funcs, | ||
928 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h index b45af65da81f..08b908c7de0f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h | |||
@@ -24,6 +24,8 @@ | |||
24 | #ifndef __VCE_V3_0_H__ | 24 | #ifndef __VCE_V3_0_H__ |
25 | #define __VCE_V3_0_H__ | 25 | #define __VCE_V3_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs vce_v3_0_ip_funcs; | 27 | extern const struct amdgpu_ip_block_version vce_v3_0_ip_block; |
28 | extern const struct amdgpu_ip_block_version vce_v3_1_ip_block; | ||
29 | extern const struct amdgpu_ip_block_version vce_v3_4_ip_block; | ||
28 | 30 | ||
29 | #endif | 31 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index c0d9aad7126f..25c0a71b257d 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c | |||
@@ -121,8 +121,8 @@ static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg) | |||
121 | u32 r; | 121 | u32 r; |
122 | 122 | ||
123 | spin_lock_irqsave(&adev->smc_idx_lock, flags); | 123 | spin_lock_irqsave(&adev->smc_idx_lock, flags); |
124 | WREG32(mmSMC_IND_INDEX_0, (reg)); | 124 | WREG32(mmSMC_IND_INDEX_11, (reg)); |
125 | r = RREG32(mmSMC_IND_DATA_0); | 125 | r = RREG32(mmSMC_IND_DATA_11); |
126 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); | 126 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); |
127 | return r; | 127 | return r; |
128 | } | 128 | } |
@@ -132,8 +132,8 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) | |||
132 | unsigned long flags; | 132 | unsigned long flags; |
133 | 133 | ||
134 | spin_lock_irqsave(&adev->smc_idx_lock, flags); | 134 | spin_lock_irqsave(&adev->smc_idx_lock, flags); |
135 | WREG32(mmSMC_IND_INDEX_0, (reg)); | 135 | WREG32(mmSMC_IND_INDEX_11, (reg)); |
136 | WREG32(mmSMC_IND_DATA_0, (v)); | 136 | WREG32(mmSMC_IND_DATA_11, (v)); |
137 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); | 137 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); |
138 | } | 138 | } |
139 | 139 | ||
@@ -437,12 +437,12 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev, | |||
437 | /* take the smc lock since we are using the smc index */ | 437 | /* take the smc lock since we are using the smc index */ |
438 | spin_lock_irqsave(&adev->smc_idx_lock, flags); | 438 | spin_lock_irqsave(&adev->smc_idx_lock, flags); |
439 | /* set rom index to 0 */ | 439 | /* set rom index to 0 */ |
440 | WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX); | 440 | WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX); |
441 | WREG32(mmSMC_IND_DATA_0, 0); | 441 | WREG32(mmSMC_IND_DATA_11, 0); |
442 | /* set index to data for continous read */ | 442 | /* set index to data for continous read */ |
443 | WREG32(mmSMC_IND_INDEX_0, ixROM_DATA); | 443 | WREG32(mmSMC_IND_INDEX_11, ixROM_DATA); |
444 | for (i = 0; i < length_dw; i++) | 444 | for (i = 0; i < length_dw; i++) |
445 | dw_ptr[i] = RREG32(mmSMC_IND_DATA_0); | 445 | dw_ptr[i] = RREG32(mmSMC_IND_DATA_11); |
446 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); | 446 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); |
447 | 447 | ||
448 | return true; | 448 | return true; |
@@ -556,21 +556,100 @@ static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = | |||
556 | {mmPA_SC_RASTER_CONFIG_1, false, true}, | 556 | {mmPA_SC_RASTER_CONFIG_1, false, true}, |
557 | }; | 557 | }; |
558 | 558 | ||
559 | static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num, | 559 | static uint32_t vi_get_register_value(struct amdgpu_device *adev, |
560 | u32 sh_num, u32 reg_offset) | 560 | bool indexed, u32 se_num, |
561 | { | 561 | u32 sh_num, u32 reg_offset) |
562 | uint32_t val; | 562 | { |
563 | if (indexed) { | ||
564 | uint32_t val; | ||
565 | unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num; | ||
566 | unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num; | ||
567 | |||
568 | switch (reg_offset) { | ||
569 | case mmCC_RB_BACKEND_DISABLE: | ||
570 | return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable; | ||
571 | case mmGC_USER_RB_BACKEND_DISABLE: | ||
572 | return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable; | ||
573 | case mmPA_SC_RASTER_CONFIG: | ||
574 | return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config; | ||
575 | case mmPA_SC_RASTER_CONFIG_1: | ||
576 | return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1; | ||
577 | } | ||
563 | 578 | ||
564 | mutex_lock(&adev->grbm_idx_mutex); | 579 | mutex_lock(&adev->grbm_idx_mutex); |
565 | if (se_num != 0xffffffff || sh_num != 0xffffffff) | 580 | if (se_num != 0xffffffff || sh_num != 0xffffffff) |
566 | amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); | 581 | amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); |
567 | 582 | ||
568 | val = RREG32(reg_offset); | 583 | val = RREG32(reg_offset); |
569 | 584 | ||
570 | if (se_num != 0xffffffff || sh_num != 0xffffffff) | 585 | if (se_num != 0xffffffff || sh_num != 0xffffffff) |
571 | amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | 586 | amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); |
572 | mutex_unlock(&adev->grbm_idx_mutex); | 587 | mutex_unlock(&adev->grbm_idx_mutex); |
573 | return val; | 588 | return val; |
589 | } else { | ||
590 | unsigned idx; | ||
591 | |||
592 | switch (reg_offset) { | ||
593 | case mmGB_ADDR_CONFIG: | ||
594 | return adev->gfx.config.gb_addr_config; | ||
595 | case mmMC_ARB_RAMCFG: | ||
596 | return adev->gfx.config.mc_arb_ramcfg; | ||
597 | case mmGB_TILE_MODE0: | ||
598 | case mmGB_TILE_MODE1: | ||
599 | case mmGB_TILE_MODE2: | ||
600 | case mmGB_TILE_MODE3: | ||
601 | case mmGB_TILE_MODE4: | ||
602 | case mmGB_TILE_MODE5: | ||
603 | case mmGB_TILE_MODE6: | ||
604 | case mmGB_TILE_MODE7: | ||
605 | case mmGB_TILE_MODE8: | ||
606 | case mmGB_TILE_MODE9: | ||
607 | case mmGB_TILE_MODE10: | ||
608 | case mmGB_TILE_MODE11: | ||
609 | case mmGB_TILE_MODE12: | ||
610 | case mmGB_TILE_MODE13: | ||
611 | case mmGB_TILE_MODE14: | ||
612 | case mmGB_TILE_MODE15: | ||
613 | case mmGB_TILE_MODE16: | ||
614 | case mmGB_TILE_MODE17: | ||
615 | case mmGB_TILE_MODE18: | ||
616 | case mmGB_TILE_MODE19: | ||
617 | case mmGB_TILE_MODE20: | ||
618 | case mmGB_TILE_MODE21: | ||
619 | case mmGB_TILE_MODE22: | ||
620 | case mmGB_TILE_MODE23: | ||
621 | case mmGB_TILE_MODE24: | ||
622 | case mmGB_TILE_MODE25: | ||
623 | case mmGB_TILE_MODE26: | ||
624 | case mmGB_TILE_MODE27: | ||
625 | case mmGB_TILE_MODE28: | ||
626 | case mmGB_TILE_MODE29: | ||
627 | case mmGB_TILE_MODE30: | ||
628 | case mmGB_TILE_MODE31: | ||
629 | idx = (reg_offset - mmGB_TILE_MODE0); | ||
630 | return adev->gfx.config.tile_mode_array[idx]; | ||
631 | case mmGB_MACROTILE_MODE0: | ||
632 | case mmGB_MACROTILE_MODE1: | ||
633 | case mmGB_MACROTILE_MODE2: | ||
634 | case mmGB_MACROTILE_MODE3: | ||
635 | case mmGB_MACROTILE_MODE4: | ||
636 | case mmGB_MACROTILE_MODE5: | ||
637 | case mmGB_MACROTILE_MODE6: | ||
638 | case mmGB_MACROTILE_MODE7: | ||
639 | case mmGB_MACROTILE_MODE8: | ||
640 | case mmGB_MACROTILE_MODE9: | ||
641 | case mmGB_MACROTILE_MODE10: | ||
642 | case mmGB_MACROTILE_MODE11: | ||
643 | case mmGB_MACROTILE_MODE12: | ||
644 | case mmGB_MACROTILE_MODE13: | ||
645 | case mmGB_MACROTILE_MODE14: | ||
646 | case mmGB_MACROTILE_MODE15: | ||
647 | idx = (reg_offset - mmGB_MACROTILE_MODE0); | ||
648 | return adev->gfx.config.macrotile_mode_array[idx]; | ||
649 | default: | ||
650 | return RREG32(reg_offset); | ||
651 | } | ||
652 | } | ||
574 | } | 653 | } |
575 | 654 | ||
576 | static int vi_read_register(struct amdgpu_device *adev, u32 se_num, | 655 | static int vi_read_register(struct amdgpu_device *adev, u32 se_num, |
@@ -605,10 +684,9 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num, | |||
605 | if (reg_offset != asic_register_entry->reg_offset) | 684 | if (reg_offset != asic_register_entry->reg_offset) |
606 | continue; | 685 | continue; |
607 | if (!asic_register_entry->untouched) | 686 | if (!asic_register_entry->untouched) |
608 | *value = asic_register_entry->grbm_indexed ? | 687 | *value = vi_get_register_value(adev, |
609 | vi_read_indexed_register(adev, se_num, | 688 | asic_register_entry->grbm_indexed, |
610 | sh_num, reg_offset) : | 689 | se_num, sh_num, reg_offset); |
611 | RREG32(reg_offset); | ||
612 | return 0; | 690 | return 0; |
613 | } | 691 | } |
614 | } | 692 | } |
@@ -618,10 +696,9 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num, | |||
618 | continue; | 696 | continue; |
619 | 697 | ||
620 | if (!vi_allowed_read_registers[i].untouched) | 698 | if (!vi_allowed_read_registers[i].untouched) |
621 | *value = vi_allowed_read_registers[i].grbm_indexed ? | 699 | *value = vi_get_register_value(adev, |
622 | vi_read_indexed_register(adev, se_num, | 700 | vi_allowed_read_registers[i].grbm_indexed, |
623 | sh_num, reg_offset) : | 701 | se_num, sh_num, reg_offset); |
624 | RREG32(reg_offset); | ||
625 | return 0; | 702 | return 0; |
626 | } | 703 | } |
627 | return -EINVAL; | 704 | return -EINVAL; |
@@ -652,18 +729,6 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) | |||
652 | return -EINVAL; | 729 | return -EINVAL; |
653 | } | 730 | } |
654 | 731 | ||
655 | static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung) | ||
656 | { | ||
657 | u32 tmp = RREG32(mmBIOS_SCRATCH_3); | ||
658 | |||
659 | if (hung) | ||
660 | tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; | ||
661 | else | ||
662 | tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; | ||
663 | |||
664 | WREG32(mmBIOS_SCRATCH_3, tmp); | ||
665 | } | ||
666 | |||
667 | /** | 732 | /** |
668 | * vi_asic_reset - soft reset GPU | 733 | * vi_asic_reset - soft reset GPU |
669 | * | 734 | * |
@@ -677,11 +742,11 @@ static int vi_asic_reset(struct amdgpu_device *adev) | |||
677 | { | 742 | { |
678 | int r; | 743 | int r; |
679 | 744 | ||
680 | vi_set_bios_scratch_engine_hung(adev, true); | 745 | amdgpu_atombios_scratch_regs_engine_hung(adev, true); |
681 | 746 | ||
682 | r = vi_gpu_pci_config_reset(adev); | 747 | r = vi_gpu_pci_config_reset(adev); |
683 | 748 | ||
684 | vi_set_bios_scratch_engine_hung(adev, false); | 749 | amdgpu_atombios_scratch_regs_engine_hung(adev, false); |
685 | 750 | ||
686 | return r; | 751 | return r; |
687 | } | 752 | } |
@@ -781,734 +846,6 @@ static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, | |||
781 | WREG32(mmBIF_DOORBELL_APER_EN, tmp); | 846 | WREG32(mmBIF_DOORBELL_APER_EN, tmp); |
782 | } | 847 | } |
783 | 848 | ||
784 | /* topaz has no DCE, UVD, VCE */ | ||
785 | static const struct amdgpu_ip_block_version topaz_ip_blocks[] = | ||
786 | { | ||
787 | /* ORDER MATTERS! */ | ||
788 | { | ||
789 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
790 | .major = 2, | ||
791 | .minor = 0, | ||
792 | .rev = 0, | ||
793 | .funcs = &vi_common_ip_funcs, | ||
794 | }, | ||
795 | { | ||
796 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
797 | .major = 7, | ||
798 | .minor = 4, | ||
799 | .rev = 0, | ||
800 | .funcs = &gmc_v7_0_ip_funcs, | ||
801 | }, | ||
802 | { | ||
803 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
804 | .major = 2, | ||
805 | .minor = 4, | ||
806 | .rev = 0, | ||
807 | .funcs = &iceland_ih_ip_funcs, | ||
808 | }, | ||
809 | { | ||
810 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
811 | .major = 7, | ||
812 | .minor = 1, | ||
813 | .rev = 0, | ||
814 | .funcs = &amdgpu_pp_ip_funcs, | ||
815 | }, | ||
816 | { | ||
817 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
818 | .major = 8, | ||
819 | .minor = 0, | ||
820 | .rev = 0, | ||
821 | .funcs = &gfx_v8_0_ip_funcs, | ||
822 | }, | ||
823 | { | ||
824 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
825 | .major = 2, | ||
826 | .minor = 4, | ||
827 | .rev = 0, | ||
828 | .funcs = &sdma_v2_4_ip_funcs, | ||
829 | }, | ||
830 | }; | ||
831 | |||
832 | static const struct amdgpu_ip_block_version topaz_ip_blocks_vd[] = | ||
833 | { | ||
834 | /* ORDER MATTERS! */ | ||
835 | { | ||
836 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
837 | .major = 2, | ||
838 | .minor = 0, | ||
839 | .rev = 0, | ||
840 | .funcs = &vi_common_ip_funcs, | ||
841 | }, | ||
842 | { | ||
843 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
844 | .major = 7, | ||
845 | .minor = 4, | ||
846 | .rev = 0, | ||
847 | .funcs = &gmc_v7_0_ip_funcs, | ||
848 | }, | ||
849 | { | ||
850 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
851 | .major = 2, | ||
852 | .minor = 4, | ||
853 | .rev = 0, | ||
854 | .funcs = &iceland_ih_ip_funcs, | ||
855 | }, | ||
856 | { | ||
857 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
858 | .major = 7, | ||
859 | .minor = 1, | ||
860 | .rev = 0, | ||
861 | .funcs = &amdgpu_pp_ip_funcs, | ||
862 | }, | ||
863 | { | ||
864 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
865 | .major = 1, | ||
866 | .minor = 0, | ||
867 | .rev = 0, | ||
868 | .funcs = &dce_virtual_ip_funcs, | ||
869 | }, | ||
870 | { | ||
871 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
872 | .major = 8, | ||
873 | .minor = 0, | ||
874 | .rev = 0, | ||
875 | .funcs = &gfx_v8_0_ip_funcs, | ||
876 | }, | ||
877 | { | ||
878 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
879 | .major = 2, | ||
880 | .minor = 4, | ||
881 | .rev = 0, | ||
882 | .funcs = &sdma_v2_4_ip_funcs, | ||
883 | }, | ||
884 | }; | ||
885 | |||
886 | static const struct amdgpu_ip_block_version tonga_ip_blocks[] = | ||
887 | { | ||
888 | /* ORDER MATTERS! */ | ||
889 | { | ||
890 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
891 | .major = 2, | ||
892 | .minor = 0, | ||
893 | .rev = 0, | ||
894 | .funcs = &vi_common_ip_funcs, | ||
895 | }, | ||
896 | { | ||
897 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
898 | .major = 8, | ||
899 | .minor = 0, | ||
900 | .rev = 0, | ||
901 | .funcs = &gmc_v8_0_ip_funcs, | ||
902 | }, | ||
903 | { | ||
904 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
905 | .major = 3, | ||
906 | .minor = 0, | ||
907 | .rev = 0, | ||
908 | .funcs = &tonga_ih_ip_funcs, | ||
909 | }, | ||
910 | { | ||
911 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
912 | .major = 7, | ||
913 | .minor = 1, | ||
914 | .rev = 0, | ||
915 | .funcs = &amdgpu_pp_ip_funcs, | ||
916 | }, | ||
917 | { | ||
918 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
919 | .major = 10, | ||
920 | .minor = 0, | ||
921 | .rev = 0, | ||
922 | .funcs = &dce_v10_0_ip_funcs, | ||
923 | }, | ||
924 | { | ||
925 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
926 | .major = 8, | ||
927 | .minor = 0, | ||
928 | .rev = 0, | ||
929 | .funcs = &gfx_v8_0_ip_funcs, | ||
930 | }, | ||
931 | { | ||
932 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
933 | .major = 3, | ||
934 | .minor = 0, | ||
935 | .rev = 0, | ||
936 | .funcs = &sdma_v3_0_ip_funcs, | ||
937 | }, | ||
938 | { | ||
939 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
940 | .major = 5, | ||
941 | .minor = 0, | ||
942 | .rev = 0, | ||
943 | .funcs = &uvd_v5_0_ip_funcs, | ||
944 | }, | ||
945 | { | ||
946 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
947 | .major = 3, | ||
948 | .minor = 0, | ||
949 | .rev = 0, | ||
950 | .funcs = &vce_v3_0_ip_funcs, | ||
951 | }, | ||
952 | }; | ||
953 | |||
954 | static const struct amdgpu_ip_block_version tonga_ip_blocks_vd[] = | ||
955 | { | ||
956 | /* ORDER MATTERS! */ | ||
957 | { | ||
958 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
959 | .major = 2, | ||
960 | .minor = 0, | ||
961 | .rev = 0, | ||
962 | .funcs = &vi_common_ip_funcs, | ||
963 | }, | ||
964 | { | ||
965 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
966 | .major = 8, | ||
967 | .minor = 0, | ||
968 | .rev = 0, | ||
969 | .funcs = &gmc_v8_0_ip_funcs, | ||
970 | }, | ||
971 | { | ||
972 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
973 | .major = 3, | ||
974 | .minor = 0, | ||
975 | .rev = 0, | ||
976 | .funcs = &tonga_ih_ip_funcs, | ||
977 | }, | ||
978 | { | ||
979 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
980 | .major = 7, | ||
981 | .minor = 1, | ||
982 | .rev = 0, | ||
983 | .funcs = &amdgpu_pp_ip_funcs, | ||
984 | }, | ||
985 | { | ||
986 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
987 | .major = 10, | ||
988 | .minor = 0, | ||
989 | .rev = 0, | ||
990 | .funcs = &dce_virtual_ip_funcs, | ||
991 | }, | ||
992 | { | ||
993 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
994 | .major = 8, | ||
995 | .minor = 0, | ||
996 | .rev = 0, | ||
997 | .funcs = &gfx_v8_0_ip_funcs, | ||
998 | }, | ||
999 | { | ||
1000 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1001 | .major = 3, | ||
1002 | .minor = 0, | ||
1003 | .rev = 0, | ||
1004 | .funcs = &sdma_v3_0_ip_funcs, | ||
1005 | }, | ||
1006 | { | ||
1007 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1008 | .major = 5, | ||
1009 | .minor = 0, | ||
1010 | .rev = 0, | ||
1011 | .funcs = &uvd_v5_0_ip_funcs, | ||
1012 | }, | ||
1013 | { | ||
1014 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1015 | .major = 3, | ||
1016 | .minor = 0, | ||
1017 | .rev = 0, | ||
1018 | .funcs = &vce_v3_0_ip_funcs, | ||
1019 | }, | ||
1020 | }; | ||
1021 | |||
1022 | static const struct amdgpu_ip_block_version fiji_ip_blocks[] = | ||
1023 | { | ||
1024 | /* ORDER MATTERS! */ | ||
1025 | { | ||
1026 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1027 | .major = 2, | ||
1028 | .minor = 0, | ||
1029 | .rev = 0, | ||
1030 | .funcs = &vi_common_ip_funcs, | ||
1031 | }, | ||
1032 | { | ||
1033 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1034 | .major = 8, | ||
1035 | .minor = 5, | ||
1036 | .rev = 0, | ||
1037 | .funcs = &gmc_v8_0_ip_funcs, | ||
1038 | }, | ||
1039 | { | ||
1040 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1041 | .major = 3, | ||
1042 | .minor = 0, | ||
1043 | .rev = 0, | ||
1044 | .funcs = &tonga_ih_ip_funcs, | ||
1045 | }, | ||
1046 | { | ||
1047 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1048 | .major = 7, | ||
1049 | .minor = 1, | ||
1050 | .rev = 0, | ||
1051 | .funcs = &amdgpu_pp_ip_funcs, | ||
1052 | }, | ||
1053 | { | ||
1054 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
1055 | .major = 10, | ||
1056 | .minor = 1, | ||
1057 | .rev = 0, | ||
1058 | .funcs = &dce_v10_0_ip_funcs, | ||
1059 | }, | ||
1060 | { | ||
1061 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1062 | .major = 8, | ||
1063 | .minor = 0, | ||
1064 | .rev = 0, | ||
1065 | .funcs = &gfx_v8_0_ip_funcs, | ||
1066 | }, | ||
1067 | { | ||
1068 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1069 | .major = 3, | ||
1070 | .minor = 0, | ||
1071 | .rev = 0, | ||
1072 | .funcs = &sdma_v3_0_ip_funcs, | ||
1073 | }, | ||
1074 | { | ||
1075 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1076 | .major = 6, | ||
1077 | .minor = 0, | ||
1078 | .rev = 0, | ||
1079 | .funcs = &uvd_v6_0_ip_funcs, | ||
1080 | }, | ||
1081 | { | ||
1082 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1083 | .major = 3, | ||
1084 | .minor = 0, | ||
1085 | .rev = 0, | ||
1086 | .funcs = &vce_v3_0_ip_funcs, | ||
1087 | }, | ||
1088 | }; | ||
1089 | |||
1090 | static const struct amdgpu_ip_block_version fiji_ip_blocks_vd[] = | ||
1091 | { | ||
1092 | /* ORDER MATTERS! */ | ||
1093 | { | ||
1094 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1095 | .major = 2, | ||
1096 | .minor = 0, | ||
1097 | .rev = 0, | ||
1098 | .funcs = &vi_common_ip_funcs, | ||
1099 | }, | ||
1100 | { | ||
1101 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1102 | .major = 8, | ||
1103 | .minor = 5, | ||
1104 | .rev = 0, | ||
1105 | .funcs = &gmc_v8_0_ip_funcs, | ||
1106 | }, | ||
1107 | { | ||
1108 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1109 | .major = 3, | ||
1110 | .minor = 0, | ||
1111 | .rev = 0, | ||
1112 | .funcs = &tonga_ih_ip_funcs, | ||
1113 | }, | ||
1114 | { | ||
1115 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1116 | .major = 7, | ||
1117 | .minor = 1, | ||
1118 | .rev = 0, | ||
1119 | .funcs = &amdgpu_pp_ip_funcs, | ||
1120 | }, | ||
1121 | { | ||
1122 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
1123 | .major = 10, | ||
1124 | .minor = 1, | ||
1125 | .rev = 0, | ||
1126 | .funcs = &dce_virtual_ip_funcs, | ||
1127 | }, | ||
1128 | { | ||
1129 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1130 | .major = 8, | ||
1131 | .minor = 0, | ||
1132 | .rev = 0, | ||
1133 | .funcs = &gfx_v8_0_ip_funcs, | ||
1134 | }, | ||
1135 | { | ||
1136 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1137 | .major = 3, | ||
1138 | .minor = 0, | ||
1139 | .rev = 0, | ||
1140 | .funcs = &sdma_v3_0_ip_funcs, | ||
1141 | }, | ||
1142 | { | ||
1143 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1144 | .major = 6, | ||
1145 | .minor = 0, | ||
1146 | .rev = 0, | ||
1147 | .funcs = &uvd_v6_0_ip_funcs, | ||
1148 | }, | ||
1149 | { | ||
1150 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1151 | .major = 3, | ||
1152 | .minor = 0, | ||
1153 | .rev = 0, | ||
1154 | .funcs = &vce_v3_0_ip_funcs, | ||
1155 | }, | ||
1156 | }; | ||
1157 | |||
1158 | static const struct amdgpu_ip_block_version polaris11_ip_blocks[] = | ||
1159 | { | ||
1160 | /* ORDER MATTERS! */ | ||
1161 | { | ||
1162 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1163 | .major = 2, | ||
1164 | .minor = 0, | ||
1165 | .rev = 0, | ||
1166 | .funcs = &vi_common_ip_funcs, | ||
1167 | }, | ||
1168 | { | ||
1169 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1170 | .major = 8, | ||
1171 | .minor = 1, | ||
1172 | .rev = 0, | ||
1173 | .funcs = &gmc_v8_0_ip_funcs, | ||
1174 | }, | ||
1175 | { | ||
1176 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1177 | .major = 3, | ||
1178 | .minor = 1, | ||
1179 | .rev = 0, | ||
1180 | .funcs = &tonga_ih_ip_funcs, | ||
1181 | }, | ||
1182 | { | ||
1183 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1184 | .major = 7, | ||
1185 | .minor = 2, | ||
1186 | .rev = 0, | ||
1187 | .funcs = &amdgpu_pp_ip_funcs, | ||
1188 | }, | ||
1189 | { | ||
1190 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
1191 | .major = 11, | ||
1192 | .minor = 2, | ||
1193 | .rev = 0, | ||
1194 | .funcs = &dce_v11_0_ip_funcs, | ||
1195 | }, | ||
1196 | { | ||
1197 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1198 | .major = 8, | ||
1199 | .minor = 0, | ||
1200 | .rev = 0, | ||
1201 | .funcs = &gfx_v8_0_ip_funcs, | ||
1202 | }, | ||
1203 | { | ||
1204 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1205 | .major = 3, | ||
1206 | .minor = 1, | ||
1207 | .rev = 0, | ||
1208 | .funcs = &sdma_v3_0_ip_funcs, | ||
1209 | }, | ||
1210 | { | ||
1211 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1212 | .major = 6, | ||
1213 | .minor = 3, | ||
1214 | .rev = 0, | ||
1215 | .funcs = &uvd_v6_0_ip_funcs, | ||
1216 | }, | ||
1217 | { | ||
1218 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1219 | .major = 3, | ||
1220 | .minor = 4, | ||
1221 | .rev = 0, | ||
1222 | .funcs = &vce_v3_0_ip_funcs, | ||
1223 | }, | ||
1224 | }; | ||
1225 | |||
1226 | static const struct amdgpu_ip_block_version polaris11_ip_blocks_vd[] = | ||
1227 | { | ||
1228 | /* ORDER MATTERS! */ | ||
1229 | { | ||
1230 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1231 | .major = 2, | ||
1232 | .minor = 0, | ||
1233 | .rev = 0, | ||
1234 | .funcs = &vi_common_ip_funcs, | ||
1235 | }, | ||
1236 | { | ||
1237 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1238 | .major = 8, | ||
1239 | .minor = 1, | ||
1240 | .rev = 0, | ||
1241 | .funcs = &gmc_v8_0_ip_funcs, | ||
1242 | }, | ||
1243 | { | ||
1244 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1245 | .major = 3, | ||
1246 | .minor = 1, | ||
1247 | .rev = 0, | ||
1248 | .funcs = &tonga_ih_ip_funcs, | ||
1249 | }, | ||
1250 | { | ||
1251 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1252 | .major = 7, | ||
1253 | .minor = 2, | ||
1254 | .rev = 0, | ||
1255 | .funcs = &amdgpu_pp_ip_funcs, | ||
1256 | }, | ||
1257 | { | ||
1258 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
1259 | .major = 11, | ||
1260 | .minor = 2, | ||
1261 | .rev = 0, | ||
1262 | .funcs = &dce_virtual_ip_funcs, | ||
1263 | }, | ||
1264 | { | ||
1265 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1266 | .major = 8, | ||
1267 | .minor = 0, | ||
1268 | .rev = 0, | ||
1269 | .funcs = &gfx_v8_0_ip_funcs, | ||
1270 | }, | ||
1271 | { | ||
1272 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1273 | .major = 3, | ||
1274 | .minor = 1, | ||
1275 | .rev = 0, | ||
1276 | .funcs = &sdma_v3_0_ip_funcs, | ||
1277 | }, | ||
1278 | { | ||
1279 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1280 | .major = 6, | ||
1281 | .minor = 3, | ||
1282 | .rev = 0, | ||
1283 | .funcs = &uvd_v6_0_ip_funcs, | ||
1284 | }, | ||
1285 | { | ||
1286 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1287 | .major = 3, | ||
1288 | .minor = 4, | ||
1289 | .rev = 0, | ||
1290 | .funcs = &vce_v3_0_ip_funcs, | ||
1291 | }, | ||
1292 | }; | ||
1293 | |||
1294 | static const struct amdgpu_ip_block_version cz_ip_blocks[] = | ||
1295 | { | ||
1296 | /* ORDER MATTERS! */ | ||
1297 | { | ||
1298 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1299 | .major = 2, | ||
1300 | .minor = 0, | ||
1301 | .rev = 0, | ||
1302 | .funcs = &vi_common_ip_funcs, | ||
1303 | }, | ||
1304 | { | ||
1305 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1306 | .major = 8, | ||
1307 | .minor = 0, | ||
1308 | .rev = 0, | ||
1309 | .funcs = &gmc_v8_0_ip_funcs, | ||
1310 | }, | ||
1311 | { | ||
1312 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1313 | .major = 3, | ||
1314 | .minor = 0, | ||
1315 | .rev = 0, | ||
1316 | .funcs = &cz_ih_ip_funcs, | ||
1317 | }, | ||
1318 | { | ||
1319 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1320 | .major = 8, | ||
1321 | .minor = 0, | ||
1322 | .rev = 0, | ||
1323 | .funcs = &amdgpu_pp_ip_funcs | ||
1324 | }, | ||
1325 | { | ||
1326 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
1327 | .major = 11, | ||
1328 | .minor = 0, | ||
1329 | .rev = 0, | ||
1330 | .funcs = &dce_v11_0_ip_funcs, | ||
1331 | }, | ||
1332 | { | ||
1333 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1334 | .major = 8, | ||
1335 | .minor = 0, | ||
1336 | .rev = 0, | ||
1337 | .funcs = &gfx_v8_0_ip_funcs, | ||
1338 | }, | ||
1339 | { | ||
1340 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1341 | .major = 3, | ||
1342 | .minor = 0, | ||
1343 | .rev = 0, | ||
1344 | .funcs = &sdma_v3_0_ip_funcs, | ||
1345 | }, | ||
1346 | { | ||
1347 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1348 | .major = 6, | ||
1349 | .minor = 0, | ||
1350 | .rev = 0, | ||
1351 | .funcs = &uvd_v6_0_ip_funcs, | ||
1352 | }, | ||
1353 | { | ||
1354 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1355 | .major = 3, | ||
1356 | .minor = 0, | ||
1357 | .rev = 0, | ||
1358 | .funcs = &vce_v3_0_ip_funcs, | ||
1359 | }, | ||
1360 | #if defined(CONFIG_DRM_AMD_ACP) | ||
1361 | { | ||
1362 | .type = AMD_IP_BLOCK_TYPE_ACP, | ||
1363 | .major = 2, | ||
1364 | .minor = 2, | ||
1365 | .rev = 0, | ||
1366 | .funcs = &acp_ip_funcs, | ||
1367 | }, | ||
1368 | #endif | ||
1369 | }; | ||
1370 | |||
1371 | static const struct amdgpu_ip_block_version cz_ip_blocks_vd[] = | ||
1372 | { | ||
1373 | /* ORDER MATTERS! */ | ||
1374 | { | ||
1375 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1376 | .major = 2, | ||
1377 | .minor = 0, | ||
1378 | .rev = 0, | ||
1379 | .funcs = &vi_common_ip_funcs, | ||
1380 | }, | ||
1381 | { | ||
1382 | .type = AMD_IP_BLOCK_TYPE_GMC, | ||
1383 | .major = 8, | ||
1384 | .minor = 0, | ||
1385 | .rev = 0, | ||
1386 | .funcs = &gmc_v8_0_ip_funcs, | ||
1387 | }, | ||
1388 | { | ||
1389 | .type = AMD_IP_BLOCK_TYPE_IH, | ||
1390 | .major = 3, | ||
1391 | .minor = 0, | ||
1392 | .rev = 0, | ||
1393 | .funcs = &cz_ih_ip_funcs, | ||
1394 | }, | ||
1395 | { | ||
1396 | .type = AMD_IP_BLOCK_TYPE_SMC, | ||
1397 | .major = 8, | ||
1398 | .minor = 0, | ||
1399 | .rev = 0, | ||
1400 | .funcs = &amdgpu_pp_ip_funcs | ||
1401 | }, | ||
1402 | { | ||
1403 | .type = AMD_IP_BLOCK_TYPE_DCE, | ||
1404 | .major = 11, | ||
1405 | .minor = 0, | ||
1406 | .rev = 0, | ||
1407 | .funcs = &dce_virtual_ip_funcs, | ||
1408 | }, | ||
1409 | { | ||
1410 | .type = AMD_IP_BLOCK_TYPE_GFX, | ||
1411 | .major = 8, | ||
1412 | .minor = 0, | ||
1413 | .rev = 0, | ||
1414 | .funcs = &gfx_v8_0_ip_funcs, | ||
1415 | }, | ||
1416 | { | ||
1417 | .type = AMD_IP_BLOCK_TYPE_SDMA, | ||
1418 | .major = 3, | ||
1419 | .minor = 0, | ||
1420 | .rev = 0, | ||
1421 | .funcs = &sdma_v3_0_ip_funcs, | ||
1422 | }, | ||
1423 | { | ||
1424 | .type = AMD_IP_BLOCK_TYPE_UVD, | ||
1425 | .major = 6, | ||
1426 | .minor = 0, | ||
1427 | .rev = 0, | ||
1428 | .funcs = &uvd_v6_0_ip_funcs, | ||
1429 | }, | ||
1430 | { | ||
1431 | .type = AMD_IP_BLOCK_TYPE_VCE, | ||
1432 | .major = 3, | ||
1433 | .minor = 0, | ||
1434 | .rev = 0, | ||
1435 | .funcs = &vce_v3_0_ip_funcs, | ||
1436 | }, | ||
1437 | #if defined(CONFIG_DRM_AMD_ACP) | ||
1438 | { | ||
1439 | .type = AMD_IP_BLOCK_TYPE_ACP, | ||
1440 | .major = 2, | ||
1441 | .minor = 2, | ||
1442 | .rev = 0, | ||
1443 | .funcs = &acp_ip_funcs, | ||
1444 | }, | ||
1445 | #endif | ||
1446 | }; | ||
1447 | |||
1448 | int vi_set_ip_blocks(struct amdgpu_device *adev) | ||
1449 | { | ||
1450 | if (adev->enable_virtual_display) { | ||
1451 | switch (adev->asic_type) { | ||
1452 | case CHIP_TOPAZ: | ||
1453 | adev->ip_blocks = topaz_ip_blocks_vd; | ||
1454 | adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks_vd); | ||
1455 | break; | ||
1456 | case CHIP_FIJI: | ||
1457 | adev->ip_blocks = fiji_ip_blocks_vd; | ||
1458 | adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks_vd); | ||
1459 | break; | ||
1460 | case CHIP_TONGA: | ||
1461 | adev->ip_blocks = tonga_ip_blocks_vd; | ||
1462 | adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks_vd); | ||
1463 | break; | ||
1464 | case CHIP_POLARIS11: | ||
1465 | case CHIP_POLARIS10: | ||
1466 | adev->ip_blocks = polaris11_ip_blocks_vd; | ||
1467 | adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks_vd); | ||
1468 | break; | ||
1469 | |||
1470 | case CHIP_CARRIZO: | ||
1471 | case CHIP_STONEY: | ||
1472 | adev->ip_blocks = cz_ip_blocks_vd; | ||
1473 | adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks_vd); | ||
1474 | break; | ||
1475 | default: | ||
1476 | /* FIXME: not supported yet */ | ||
1477 | return -EINVAL; | ||
1478 | } | ||
1479 | } else { | ||
1480 | switch (adev->asic_type) { | ||
1481 | case CHIP_TOPAZ: | ||
1482 | adev->ip_blocks = topaz_ip_blocks; | ||
1483 | adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks); | ||
1484 | break; | ||
1485 | case CHIP_FIJI: | ||
1486 | adev->ip_blocks = fiji_ip_blocks; | ||
1487 | adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks); | ||
1488 | break; | ||
1489 | case CHIP_TONGA: | ||
1490 | adev->ip_blocks = tonga_ip_blocks; | ||
1491 | adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks); | ||
1492 | break; | ||
1493 | case CHIP_POLARIS11: | ||
1494 | case CHIP_POLARIS10: | ||
1495 | adev->ip_blocks = polaris11_ip_blocks; | ||
1496 | adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks); | ||
1497 | break; | ||
1498 | case CHIP_CARRIZO: | ||
1499 | case CHIP_STONEY: | ||
1500 | adev->ip_blocks = cz_ip_blocks; | ||
1501 | adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks); | ||
1502 | break; | ||
1503 | default: | ||
1504 | /* FIXME: not supported yet */ | ||
1505 | return -EINVAL; | ||
1506 | } | ||
1507 | } | ||
1508 | |||
1509 | return 0; | ||
1510 | } | ||
1511 | |||
1512 | #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044 | 849 | #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044 |
1513 | #define ATI_REV_ID_FUSE_MACRO__SHIFT 9 | 850 | #define ATI_REV_ID_FUSE_MACRO__SHIFT 9 |
1514 | #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00 | 851 | #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00 |
@@ -1593,7 +930,7 @@ static int vi_common_early_init(void *handle) | |||
1593 | break; | 930 | break; |
1594 | case CHIP_TONGA: | 931 | case CHIP_TONGA: |
1595 | adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG; | 932 | adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG; |
1596 | adev->pg_flags = 0; | 933 | adev->pg_flags = AMD_PG_SUPPORT_UVD; |
1597 | adev->external_rev_id = adev->rev_id + 0x14; | 934 | adev->external_rev_id = adev->rev_id + 0x14; |
1598 | break; | 935 | break; |
1599 | case CHIP_POLARIS11: | 936 | case CHIP_POLARIS11: |
@@ -1908,7 +1245,7 @@ static int vi_common_set_powergating_state(void *handle, | |||
1908 | return 0; | 1245 | return 0; |
1909 | } | 1246 | } |
1910 | 1247 | ||
1911 | const struct amd_ip_funcs vi_common_ip_funcs = { | 1248 | static const struct amd_ip_funcs vi_common_ip_funcs = { |
1912 | .name = "vi_common", | 1249 | .name = "vi_common", |
1913 | .early_init = vi_common_early_init, | 1250 | .early_init = vi_common_early_init, |
1914 | .late_init = NULL, | 1251 | .late_init = NULL, |
@@ -1925,3 +1262,110 @@ const struct amd_ip_funcs vi_common_ip_funcs = { | |||
1925 | .set_powergating_state = vi_common_set_powergating_state, | 1262 | .set_powergating_state = vi_common_set_powergating_state, |
1926 | }; | 1263 | }; |
1927 | 1264 | ||
1265 | static const struct amdgpu_ip_block_version vi_common_ip_block = | ||
1266 | { | ||
1267 | .type = AMD_IP_BLOCK_TYPE_COMMON, | ||
1268 | .major = 1, | ||
1269 | .minor = 0, | ||
1270 | .rev = 0, | ||
1271 | .funcs = &vi_common_ip_funcs, | ||
1272 | }; | ||
1273 | |||
1274 | int vi_set_ip_blocks(struct amdgpu_device *adev) | ||
1275 | { | ||
1276 | switch (adev->asic_type) { | ||
1277 | case CHIP_TOPAZ: | ||
1278 | /* topaz has no DCE, UVD, VCE */ | ||
1279 | amdgpu_ip_block_add(adev, &vi_common_ip_block); | ||
1280 | amdgpu_ip_block_add(adev, &gmc_v7_4_ip_block); | ||
1281 | amdgpu_ip_block_add(adev, &iceland_ih_ip_block); | ||
1282 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1283 | if (adev->enable_virtual_display) | ||
1284 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1285 | amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); | ||
1286 | amdgpu_ip_block_add(adev, &sdma_v2_4_ip_block); | ||
1287 | break; | ||
1288 | case CHIP_FIJI: | ||
1289 | amdgpu_ip_block_add(adev, &vi_common_ip_block); | ||
1290 | amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block); | ||
1291 | amdgpu_ip_block_add(adev, &tonga_ih_ip_block); | ||
1292 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1293 | if (adev->enable_virtual_display) | ||
1294 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1295 | else | ||
1296 | amdgpu_ip_block_add(adev, &dce_v10_1_ip_block); | ||
1297 | amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); | ||
1298 | amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); | ||
1299 | amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block); | ||
1300 | amdgpu_ip_block_add(adev, &vce_v3_0_ip_block); | ||
1301 | break; | ||
1302 | case CHIP_TONGA: | ||
1303 | amdgpu_ip_block_add(adev, &vi_common_ip_block); | ||
1304 | amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block); | ||
1305 | amdgpu_ip_block_add(adev, &tonga_ih_ip_block); | ||
1306 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1307 | if (adev->enable_virtual_display) | ||
1308 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1309 | else | ||
1310 | amdgpu_ip_block_add(adev, &dce_v10_0_ip_block); | ||
1311 | amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); | ||
1312 | amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); | ||
1313 | amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block); | ||
1314 | amdgpu_ip_block_add(adev, &vce_v3_0_ip_block); | ||
1315 | break; | ||
1316 | case CHIP_POLARIS11: | ||
1317 | case CHIP_POLARIS10: | ||
1318 | amdgpu_ip_block_add(adev, &vi_common_ip_block); | ||
1319 | amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block); | ||
1320 | amdgpu_ip_block_add(adev, &tonga_ih_ip_block); | ||
1321 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1322 | if (adev->enable_virtual_display) | ||
1323 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1324 | else | ||
1325 | amdgpu_ip_block_add(adev, &dce_v11_2_ip_block); | ||
1326 | amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); | ||
1327 | amdgpu_ip_block_add(adev, &sdma_v3_1_ip_block); | ||
1328 | amdgpu_ip_block_add(adev, &uvd_v6_3_ip_block); | ||
1329 | amdgpu_ip_block_add(adev, &vce_v3_4_ip_block); | ||
1330 | break; | ||
1331 | case CHIP_CARRIZO: | ||
1332 | amdgpu_ip_block_add(adev, &vi_common_ip_block); | ||
1333 | amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block); | ||
1334 | amdgpu_ip_block_add(adev, &cz_ih_ip_block); | ||
1335 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1336 | if (adev->enable_virtual_display) | ||
1337 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1338 | else | ||
1339 | amdgpu_ip_block_add(adev, &dce_v11_0_ip_block); | ||
1340 | amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); | ||
1341 | amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); | ||
1342 | amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block); | ||
1343 | amdgpu_ip_block_add(adev, &vce_v3_1_ip_block); | ||
1344 | #if defined(CONFIG_DRM_AMD_ACP) | ||
1345 | amdgpu_ip_block_add(adev, &acp_ip_block); | ||
1346 | #endif | ||
1347 | break; | ||
1348 | case CHIP_STONEY: | ||
1349 | amdgpu_ip_block_add(adev, &vi_common_ip_block); | ||
1350 | amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block); | ||
1351 | amdgpu_ip_block_add(adev, &cz_ih_ip_block); | ||
1352 | amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); | ||
1353 | if (adev->enable_virtual_display) | ||
1354 | amdgpu_ip_block_add(adev, &dce_virtual_ip_block); | ||
1355 | else | ||
1356 | amdgpu_ip_block_add(adev, &dce_v11_0_ip_block); | ||
1357 | amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block); | ||
1358 | amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); | ||
1359 | amdgpu_ip_block_add(adev, &uvd_v6_2_ip_block); | ||
1360 | amdgpu_ip_block_add(adev, &vce_v3_4_ip_block); | ||
1361 | #if defined(CONFIG_DRM_AMD_ACP) | ||
1362 | amdgpu_ip_block_add(adev, &acp_ip_block); | ||
1363 | #endif | ||
1364 | break; | ||
1365 | default: | ||
1366 | /* FIXME: not supported yet */ | ||
1367 | return -EINVAL; | ||
1368 | } | ||
1369 | |||
1370 | return 0; | ||
1371 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h index 502094042462..575d7aed5d32 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.h +++ b/drivers/gpu/drm/amd/amdgpu/vi.h | |||
@@ -24,8 +24,6 @@ | |||
24 | #ifndef __VI_H__ | 24 | #ifndef __VI_H__ |
25 | #define __VI_H__ | 25 | #define __VI_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs vi_common_ip_funcs; | ||
28 | |||
29 | void vi_srbm_select(struct amdgpu_device *adev, | 27 | void vi_srbm_select(struct amdgpu_device *adev, |
30 | u32 me, u32 pipe, u32 queue, u32 vmid); | 28 | u32 me, u32 pipe, u32 queue, u32 vmid); |
31 | int vi_set_ip_blocks(struct amdgpu_device *adev); | 29 | int vi_set_ip_blocks(struct amdgpu_device *adev); |
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index bec8125bceb0..d1986276dbbd 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h | |||
@@ -84,6 +84,29 @@ enum amd_powergating_state { | |||
84 | AMD_PG_STATE_UNGATE, | 84 | AMD_PG_STATE_UNGATE, |
85 | }; | 85 | }; |
86 | 86 | ||
87 | struct amd_vce_state { | ||
88 | /* vce clocks */ | ||
89 | u32 evclk; | ||
90 | u32 ecclk; | ||
91 | /* gpu clocks */ | ||
92 | u32 sclk; | ||
93 | u32 mclk; | ||
94 | u8 clk_idx; | ||
95 | u8 pstate; | ||
96 | }; | ||
97 | |||
98 | |||
99 | #define AMD_MAX_VCE_LEVELS 6 | ||
100 | |||
101 | enum amd_vce_level { | ||
102 | AMD_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */ | ||
103 | AMD_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */ | ||
104 | AMD_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */ | ||
105 | AMD_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */ | ||
106 | AMD_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */ | ||
107 | AMD_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */ | ||
108 | }; | ||
109 | |||
87 | /* CG flags */ | 110 | /* CG flags */ |
88 | #define AMD_CG_SUPPORT_GFX_MGCG (1 << 0) | 111 | #define AMD_CG_SUPPORT_GFX_MGCG (1 << 0) |
89 | #define AMD_CG_SUPPORT_GFX_MGLS (1 << 1) | 112 | #define AMD_CG_SUPPORT_GFX_MGLS (1 << 1) |
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h index 3014d4a58c43..a9ef1562f43b 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h | |||
@@ -176,6 +176,8 @@ | |||
176 | #define mmSMU1_SMU_SMC_IND_DATA 0x83 | 176 | #define mmSMU1_SMU_SMC_IND_DATA 0x83 |
177 | #define mmSMU2_SMU_SMC_IND_DATA 0x85 | 177 | #define mmSMU2_SMU_SMC_IND_DATA 0x85 |
178 | #define mmSMU3_SMU_SMC_IND_DATA 0x87 | 178 | #define mmSMU3_SMU_SMC_IND_DATA 0x87 |
179 | #define mmSMC_IND_INDEX_11 0x1AC | ||
180 | #define mmSMC_IND_DATA_11 0x1AD | ||
179 | #define ixRCU_UC_EVENTS 0xc0000004 | 181 | #define ixRCU_UC_EVENTS 0xc0000004 |
180 | #define ixRCU_MISC_CTRL 0xc0000010 | 182 | #define ixRCU_MISC_CTRL 0xc0000010 |
181 | #define ixCC_RCU_FUSES 0xc00c0000 | 183 | #define ixCC_RCU_FUSES 0xc00c0000 |
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h index 933917479985..22dd4c2b7290 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h | |||
@@ -87,6 +87,8 @@ | |||
87 | #define mmSMC_IND_DATA_6 0x8d | 87 | #define mmSMC_IND_DATA_6 0x8d |
88 | #define mmSMC_IND_INDEX_7 0x8e | 88 | #define mmSMC_IND_INDEX_7 0x8e |
89 | #define mmSMC_IND_DATA_7 0x8f | 89 | #define mmSMC_IND_DATA_7 0x8f |
90 | #define mmSMC_IND_INDEX_11 0x1AC | ||
91 | #define mmSMC_IND_DATA_11 0x1AD | ||
90 | #define mmSMC_IND_ACCESS_CNTL 0x92 | 92 | #define mmSMC_IND_ACCESS_CNTL 0x92 |
91 | #define mmSMC_MESSAGE_0 0x94 | 93 | #define mmSMC_MESSAGE_0 0x94 |
92 | #define mmSMC_RESP_0 0x95 | 94 | #define mmSMC_RESP_0 0x95 |
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h index 44b1855cb8df..eca2b851f25f 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h | |||
@@ -90,6 +90,8 @@ | |||
90 | #define mmSMC_IND_DATA_6 0x8d | 90 | #define mmSMC_IND_DATA_6 0x8d |
91 | #define mmSMC_IND_INDEX_7 0x8e | 91 | #define mmSMC_IND_INDEX_7 0x8e |
92 | #define mmSMC_IND_DATA_7 0x8f | 92 | #define mmSMC_IND_DATA_7 0x8f |
93 | #define mmSMC_IND_INDEX_11 0x1AC | ||
94 | #define mmSMC_IND_DATA_11 0x1AD | ||
93 | #define mmSMC_IND_ACCESS_CNTL 0x92 | 95 | #define mmSMC_IND_ACCESS_CNTL 0x92 |
94 | #define mmSMC_MESSAGE_0 0x94 | 96 | #define mmSMC_MESSAGE_0 0x94 |
95 | #define mmSMC_RESP_0 0x95 | 97 | #define mmSMC_RESP_0 0x95 |
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index df7c18b6a02a..e4a1697ec1d3 100755 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h | |||
@@ -106,6 +106,7 @@ enum cgs_ucode_id { | |||
106 | CGS_UCODE_ID_CP_MEC_JT2, | 106 | CGS_UCODE_ID_CP_MEC_JT2, |
107 | CGS_UCODE_ID_GMCON_RENG, | 107 | CGS_UCODE_ID_GMCON_RENG, |
108 | CGS_UCODE_ID_RLC_G, | 108 | CGS_UCODE_ID_RLC_G, |
109 | CGS_UCODE_ID_STORAGE, | ||
109 | CGS_UCODE_ID_MAXIMUM, | 110 | CGS_UCODE_ID_MAXIMUM, |
110 | }; | 111 | }; |
111 | 112 | ||
@@ -619,6 +620,8 @@ typedef int (*cgs_call_acpi_method)(struct cgs_device *cgs_device, | |||
619 | typedef int (*cgs_query_system_info)(struct cgs_device *cgs_device, | 620 | typedef int (*cgs_query_system_info)(struct cgs_device *cgs_device, |
620 | struct cgs_system_info *sys_info); | 621 | struct cgs_system_info *sys_info); |
621 | 622 | ||
623 | typedef int (*cgs_is_virtualization_enabled_t)(void *cgs_device); | ||
624 | |||
622 | struct cgs_ops { | 625 | struct cgs_ops { |
623 | /* memory management calls (similar to KFD interface) */ | 626 | /* memory management calls (similar to KFD interface) */ |
624 | cgs_gpu_mem_info_t gpu_mem_info; | 627 | cgs_gpu_mem_info_t gpu_mem_info; |
@@ -670,6 +673,7 @@ struct cgs_ops { | |||
670 | cgs_call_acpi_method call_acpi_method; | 673 | cgs_call_acpi_method call_acpi_method; |
671 | /* get system info */ | 674 | /* get system info */ |
672 | cgs_query_system_info query_system_info; | 675 | cgs_query_system_info query_system_info; |
676 | cgs_is_virtualization_enabled_t is_virtualization_enabled; | ||
673 | }; | 677 | }; |
674 | 678 | ||
675 | struct cgs_os_ops; /* To be define in OS-specific CGS header */ | 679 | struct cgs_os_ops; /* To be define in OS-specific CGS header */ |
@@ -773,4 +777,6 @@ struct cgs_device | |||
773 | CGS_CALL(get_pci_resource, cgs_device, resource_type, size, offset, \ | 777 | CGS_CALL(get_pci_resource, cgs_device, resource_type, size, offset, \ |
774 | resource_base) | 778 | resource_base) |
775 | 779 | ||
780 | #define cgs_is_virtualization_enabled(cgs_device) \ | ||
781 | CGS_CALL(is_virtualization_enabled, cgs_device) | ||
776 | #endif /* _CGS_COMMON_H */ | 782 | #endif /* _CGS_COMMON_H */ |
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 7174f7a68266..0b1f2205c2f1 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c | |||
@@ -436,7 +436,8 @@ static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state) | |||
436 | } | 436 | } |
437 | } | 437 | } |
438 | 438 | ||
439 | int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input, void *output) | 439 | static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, |
440 | void *input, void *output) | ||
440 | { | 441 | { |
441 | int ret = 0; | 442 | int ret = 0; |
442 | struct pp_instance *pp_handle; | 443 | struct pp_instance *pp_handle; |
@@ -475,7 +476,7 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input, | |||
475 | return ret; | 476 | return ret; |
476 | } | 477 | } |
477 | 478 | ||
478 | enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) | 479 | static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) |
479 | { | 480 | { |
480 | struct pp_hwmgr *hwmgr; | 481 | struct pp_hwmgr *hwmgr; |
481 | struct pp_power_state *state; | 482 | struct pp_power_state *state; |
@@ -820,6 +821,21 @@ static int pp_dpm_read_sensor(void *handle, int idx, int32_t *value) | |||
820 | return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value); | 821 | return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value); |
821 | } | 822 | } |
822 | 823 | ||
824 | static struct amd_vce_state* | ||
825 | pp_dpm_get_vce_clock_state(void *handle, unsigned idx) | ||
826 | { | ||
827 | struct pp_hwmgr *hwmgr; | ||
828 | |||
829 | if (handle) { | ||
830 | hwmgr = ((struct pp_instance *)handle)->hwmgr; | ||
831 | |||
832 | if (hwmgr && idx < hwmgr->num_vce_state_tables) | ||
833 | return &hwmgr->vce_states[idx]; | ||
834 | } | ||
835 | |||
836 | return NULL; | ||
837 | } | ||
838 | |||
823 | const struct amd_powerplay_funcs pp_dpm_funcs = { | 839 | const struct amd_powerplay_funcs pp_dpm_funcs = { |
824 | .get_temperature = pp_dpm_get_temperature, | 840 | .get_temperature = pp_dpm_get_temperature, |
825 | .load_firmware = pp_dpm_load_fw, | 841 | .load_firmware = pp_dpm_load_fw, |
@@ -846,6 +862,7 @@ const struct amd_powerplay_funcs pp_dpm_funcs = { | |||
846 | .get_mclk_od = pp_dpm_get_mclk_od, | 862 | .get_mclk_od = pp_dpm_get_mclk_od, |
847 | .set_mclk_od = pp_dpm_set_mclk_od, | 863 | .set_mclk_od = pp_dpm_set_mclk_od, |
848 | .read_sensor = pp_dpm_read_sensor, | 864 | .read_sensor = pp_dpm_read_sensor, |
865 | .get_vce_clock_state = pp_dpm_get_vce_clock_state, | ||
849 | }; | 866 | }; |
850 | 867 | ||
851 | static int amd_pp_instance_init(struct amd_pp_init *pp_init, | 868 | static int amd_pp_instance_init(struct amd_pp_init *pp_init, |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 960424913496..4b14f259a147 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | |||
@@ -66,7 +66,7 @@ static const struct cz_power_state *cast_const_PhwCzPowerState( | |||
66 | return (struct cz_power_state *)hw_ps; | 66 | return (struct cz_power_state *)hw_ps; |
67 | } | 67 | } |
68 | 68 | ||
69 | uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr, | 69 | static uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr, |
70 | uint32_t clock, uint32_t msg) | 70 | uint32_t clock, uint32_t msg) |
71 | { | 71 | { |
72 | int i = 0; | 72 | int i = 0; |
@@ -1017,7 +1017,7 @@ static int cz_tf_program_bootup_state(struct pp_hwmgr *hwmgr, void *input, | |||
1017 | return 0; | 1017 | return 0; |
1018 | } | 1018 | } |
1019 | 1019 | ||
1020 | int cz_tf_reset_acp_boot_level(struct pp_hwmgr *hwmgr, void *input, | 1020 | static int cz_tf_reset_acp_boot_level(struct pp_hwmgr *hwmgr, void *input, |
1021 | void *output, void *storage, int result) | 1021 | void *output, void *storage, int result) |
1022 | { | 1022 | { |
1023 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); | 1023 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); |
@@ -1225,7 +1225,7 @@ static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) | |||
1225 | return 0; | 1225 | return 0; |
1226 | } | 1226 | } |
1227 | 1227 | ||
1228 | int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr) | 1228 | static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr) |
1229 | { | 1229 | { |
1230 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); | 1230 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); |
1231 | 1231 | ||
@@ -1239,7 +1239,7 @@ int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr) | |||
1239 | return 0; | 1239 | return 0; |
1240 | } | 1240 | } |
1241 | 1241 | ||
1242 | int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr) | 1242 | static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr) |
1243 | { | 1243 | { |
1244 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); | 1244 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); |
1245 | struct phm_clock_voltage_dependency_table *table = | 1245 | struct phm_clock_voltage_dependency_table *table = |
@@ -1277,7 +1277,7 @@ int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr) | |||
1277 | return 0; | 1277 | return 0; |
1278 | } | 1278 | } |
1279 | 1279 | ||
1280 | int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr) | 1280 | static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr) |
1281 | { | 1281 | { |
1282 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); | 1282 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); |
1283 | 1283 | ||
@@ -1533,7 +1533,7 @@ static int cz_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr, | |||
1533 | return result; | 1533 | return result; |
1534 | } | 1534 | } |
1535 | 1535 | ||
1536 | int cz_get_power_state_size(struct pp_hwmgr *hwmgr) | 1536 | static int cz_get_power_state_size(struct pp_hwmgr *hwmgr) |
1537 | { | 1537 | { |
1538 | return sizeof(struct cz_power_state); | 1538 | return sizeof(struct cz_power_state); |
1539 | } | 1539 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c index 1944d289f846..f5e8fda964f7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include "linux/delay.h" | 25 | #include "linux/delay.h" |
26 | #include "hwmgr.h" | 26 | #include "hwmgr.h" |
27 | #include "amd_acpi.h" | 27 | #include "amd_acpi.h" |
28 | #include "pp_acpi.h" | ||
28 | 29 | ||
29 | bool acpi_atcs_functions_supported(void *device, uint32_t index) | 30 | bool acpi_atcs_functions_supported(void *device, uint32_t index) |
30 | { | 31 | { |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c index 7de701d8a450..baf0f3d4c2f0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c | |||
@@ -131,7 +131,7 @@ static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps) | |||
131 | /** | 131 | /** |
132 | * Private Function to get the PowerPlay Table Address. | 132 | * Private Function to get the PowerPlay Table Address. |
133 | */ | 133 | */ |
134 | const void *get_powerplay_table(struct pp_hwmgr *hwmgr) | 134 | static const void *get_powerplay_table(struct pp_hwmgr *hwmgr) |
135 | { | 135 | { |
136 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); | 136 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
137 | 137 | ||
@@ -1049,7 +1049,7 @@ static int check_powerplay_tables( | |||
1049 | return 0; | 1049 | return 0; |
1050 | } | 1050 | } |
1051 | 1051 | ||
1052 | int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr) | 1052 | static int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr) |
1053 | { | 1053 | { |
1054 | int result = 0; | 1054 | int result = 0; |
1055 | const ATOM_Tonga_POWERPLAYTABLE *powerplay_table; | 1055 | const ATOM_Tonga_POWERPLAYTABLE *powerplay_table; |
@@ -1100,7 +1100,7 @@ int pp_tables_v1_0_initialize(struct pp_hwmgr *hwmgr) | |||
1100 | return result; | 1100 | return result; |
1101 | } | 1101 | } |
1102 | 1102 | ||
1103 | int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr) | 1103 | static int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr) |
1104 | { | 1104 | { |
1105 | struct phm_ppt_v1_information *pp_table_information = | 1105 | struct phm_ppt_v1_information *pp_table_information = |
1106 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 1106 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
@@ -1211,7 +1211,7 @@ static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr) | |||
1211 | } | 1211 | } |
1212 | 1212 | ||
1213 | static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i, | 1213 | static int ppt_get_vce_state_table_entry_v1_0(struct pp_hwmgr *hwmgr, uint32_t i, |
1214 | struct pp_vce_state *vce_state, void **clock_info, uint32_t *flag) | 1214 | struct amd_vce_state *vce_state, void **clock_info, uint32_t *flag) |
1215 | { | 1215 | { |
1216 | const ATOM_Tonga_VCE_State_Record *vce_state_record; | 1216 | const ATOM_Tonga_VCE_State_Record *vce_state_record; |
1217 | ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record; | 1217 | ATOM_Tonga_SCLK_Dependency_Record *sclk_dep_record; |
@@ -1315,7 +1315,7 @@ int get_powerplay_table_entry_v1_0(struct pp_hwmgr *hwmgr, | |||
1315 | 1315 | ||
1316 | hwmgr->num_vce_state_tables = i = ppt_get_num_of_vce_state_table_entries_v1_0(hwmgr); | 1316 | hwmgr->num_vce_state_tables = i = ppt_get_num_of_vce_state_table_entries_v1_0(hwmgr); |
1317 | 1317 | ||
1318 | if ((i != 0) && (i <= PP_MAX_VCE_LEVELS)) { | 1318 | if ((i != 0) && (i <= AMD_MAX_VCE_LEVELS)) { |
1319 | for (j = 0; j < i; j++) | 1319 | for (j = 0; j < i; j++) |
1320 | ppt_get_vce_state_table_entry_v1_0(hwmgr, j, &(hwmgr->vce_states[j]), NULL, &flags); | 1320 | ppt_get_vce_state_table_entry_v1_0(hwmgr, j, &(hwmgr->vce_states[j]), NULL, &flags); |
1321 | } | 1321 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c index ccf7ebeaf892..a4e9cf429e62 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c | |||
@@ -1507,7 +1507,7 @@ static int init_phase_shedding_table(struct pp_hwmgr *hwmgr, | |||
1507 | return 0; | 1507 | return 0; |
1508 | } | 1508 | } |
1509 | 1509 | ||
1510 | int get_number_of_vce_state_table_entries( | 1510 | static int get_number_of_vce_state_table_entries( |
1511 | struct pp_hwmgr *hwmgr) | 1511 | struct pp_hwmgr *hwmgr) |
1512 | { | 1512 | { |
1513 | const ATOM_PPLIB_POWERPLAYTABLE *table = | 1513 | const ATOM_PPLIB_POWERPLAYTABLE *table = |
@@ -1521,9 +1521,9 @@ int get_number_of_vce_state_table_entries( | |||
1521 | return 0; | 1521 | return 0; |
1522 | } | 1522 | } |
1523 | 1523 | ||
1524 | int get_vce_state_table_entry(struct pp_hwmgr *hwmgr, | 1524 | static int get_vce_state_table_entry(struct pp_hwmgr *hwmgr, |
1525 | unsigned long i, | 1525 | unsigned long i, |
1526 | struct pp_vce_state *vce_state, | 1526 | struct amd_vce_state *vce_state, |
1527 | void **clock_info, | 1527 | void **clock_info, |
1528 | unsigned long *flag) | 1528 | unsigned long *flag) |
1529 | { | 1529 | { |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c index 6eb6db199250..cf2ee93d8475 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c | |||
@@ -75,7 +75,7 @@ int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr) | |||
75 | return 0; | 75 | return 0; |
76 | } | 76 | } |
77 | 77 | ||
78 | int smu7_powerup_uvd(struct pp_hwmgr *hwmgr) | 78 | static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr) |
79 | { | 79 | { |
80 | if (phm_cf_want_uvd_power_gating(hwmgr)) { | 80 | if (phm_cf_want_uvd_power_gating(hwmgr)) { |
81 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 81 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
@@ -91,7 +91,7 @@ int smu7_powerup_uvd(struct pp_hwmgr *hwmgr) | |||
91 | return 0; | 91 | return 0; |
92 | } | 92 | } |
93 | 93 | ||
94 | int smu7_powerdown_vce(struct pp_hwmgr *hwmgr) | 94 | static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr) |
95 | { | 95 | { |
96 | if (phm_cf_want_vce_power_gating(hwmgr)) | 96 | if (phm_cf_want_vce_power_gating(hwmgr)) |
97 | return smum_send_msg_to_smc(hwmgr->smumgr, | 97 | return smum_send_msg_to_smc(hwmgr->smumgr, |
@@ -99,7 +99,7 @@ int smu7_powerdown_vce(struct pp_hwmgr *hwmgr) | |||
99 | return 0; | 99 | return 0; |
100 | } | 100 | } |
101 | 101 | ||
102 | int smu7_powerup_vce(struct pp_hwmgr *hwmgr) | 102 | static int smu7_powerup_vce(struct pp_hwmgr *hwmgr) |
103 | { | 103 | { |
104 | if (phm_cf_want_vce_power_gating(hwmgr)) | 104 | if (phm_cf_want_vce_power_gating(hwmgr)) |
105 | return smum_send_msg_to_smc(hwmgr->smumgr, | 105 | return smum_send_msg_to_smc(hwmgr->smumgr, |
@@ -107,7 +107,7 @@ int smu7_powerup_vce(struct pp_hwmgr *hwmgr) | |||
107 | return 0; | 107 | return 0; |
108 | } | 108 | } |
109 | 109 | ||
110 | int smu7_powerdown_samu(struct pp_hwmgr *hwmgr) | 110 | static int smu7_powerdown_samu(struct pp_hwmgr *hwmgr) |
111 | { | 111 | { |
112 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 112 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
113 | PHM_PlatformCaps_SamuPowerGating)) | 113 | PHM_PlatformCaps_SamuPowerGating)) |
@@ -116,7 +116,7 @@ int smu7_powerdown_samu(struct pp_hwmgr *hwmgr) | |||
116 | return 0; | 116 | return 0; |
117 | } | 117 | } |
118 | 118 | ||
119 | int smu7_powerup_samu(struct pp_hwmgr *hwmgr) | 119 | static int smu7_powerup_samu(struct pp_hwmgr *hwmgr) |
120 | { | 120 | { |
121 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 121 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
122 | PHM_PlatformCaps_SamuPowerGating)) | 122 | PHM_PlatformCaps_SamuPowerGating)) |
@@ -149,15 +149,21 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) | |||
149 | if (bgate) { | 149 | if (bgate) { |
150 | cgs_set_clockgating_state(hwmgr->device, | 150 | cgs_set_clockgating_state(hwmgr->device, |
151 | AMD_IP_BLOCK_TYPE_UVD, | 151 | AMD_IP_BLOCK_TYPE_UVD, |
152 | AMD_CG_STATE_GATE); | 152 | AMD_CG_STATE_UNGATE); |
153 | cgs_set_powergating_state(hwmgr->device, | ||
154 | AMD_IP_BLOCK_TYPE_UVD, | ||
155 | AMD_PG_STATE_GATE); | ||
153 | smu7_update_uvd_dpm(hwmgr, true); | 156 | smu7_update_uvd_dpm(hwmgr, true); |
154 | smu7_powerdown_uvd(hwmgr); | 157 | smu7_powerdown_uvd(hwmgr); |
155 | } else { | 158 | } else { |
156 | smu7_powerup_uvd(hwmgr); | 159 | smu7_powerup_uvd(hwmgr); |
157 | smu7_update_uvd_dpm(hwmgr, false); | 160 | cgs_set_powergating_state(hwmgr->device, |
161 | AMD_IP_BLOCK_TYPE_UVD, | ||
162 | AMD_CG_STATE_UNGATE); | ||
158 | cgs_set_clockgating_state(hwmgr->device, | 163 | cgs_set_clockgating_state(hwmgr->device, |
159 | AMD_IP_BLOCK_TYPE_UVD, | 164 | AMD_IP_BLOCK_TYPE_UVD, |
160 | AMD_CG_STATE_UNGATE); | 165 | AMD_CG_STATE_GATE); |
166 | smu7_update_uvd_dpm(hwmgr, false); | ||
161 | } | 167 | } |
162 | 168 | ||
163 | return 0; | 169 | return 0; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 609996c84ad5..073e0bfa22a0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | |||
@@ -89,7 +89,7 @@ enum DPM_EVENT_SRC { | |||
89 | 89 | ||
90 | static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic); | 90 | static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic); |
91 | 91 | ||
92 | struct smu7_power_state *cast_phw_smu7_power_state( | 92 | static struct smu7_power_state *cast_phw_smu7_power_state( |
93 | struct pp_hw_power_state *hw_ps) | 93 | struct pp_hw_power_state *hw_ps) |
94 | { | 94 | { |
95 | PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), | 95 | PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), |
@@ -99,7 +99,7 @@ struct smu7_power_state *cast_phw_smu7_power_state( | |||
99 | return (struct smu7_power_state *)hw_ps; | 99 | return (struct smu7_power_state *)hw_ps; |
100 | } | 100 | } |
101 | 101 | ||
102 | const struct smu7_power_state *cast_const_phw_smu7_power_state( | 102 | static const struct smu7_power_state *cast_const_phw_smu7_power_state( |
103 | const struct pp_hw_power_state *hw_ps) | 103 | const struct pp_hw_power_state *hw_ps) |
104 | { | 104 | { |
105 | PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), | 105 | PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), |
@@ -115,7 +115,7 @@ const struct smu7_power_state *cast_const_phw_smu7_power_state( | |||
115 | * @param hwmgr the address of the powerplay hardware manager. | 115 | * @param hwmgr the address of the powerplay hardware manager. |
116 | * @return always 0 | 116 | * @return always 0 |
117 | */ | 117 | */ |
118 | int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr) | 118 | static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr) |
119 | { | 119 | { |
120 | cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); | 120 | cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); |
121 | 121 | ||
@@ -124,7 +124,7 @@ int smu7_get_mc_microcode_version (struct pp_hwmgr *hwmgr) | |||
124 | return 0; | 124 | return 0; |
125 | } | 125 | } |
126 | 126 | ||
127 | uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr) | 127 | static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr) |
128 | { | 128 | { |
129 | uint32_t speedCntl = 0; | 129 | uint32_t speedCntl = 0; |
130 | 130 | ||
@@ -135,7 +135,7 @@ uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr) | |||
135 | PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); | 135 | PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); |
136 | } | 136 | } |
137 | 137 | ||
138 | int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) | 138 | static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) |
139 | { | 139 | { |
140 | uint32_t link_width; | 140 | uint32_t link_width; |
141 | 141 | ||
@@ -155,7 +155,7 @@ int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) | |||
155 | * @param pHwMgr the address of the powerplay hardware manager. | 155 | * @param pHwMgr the address of the powerplay hardware manager. |
156 | * @return always PP_Result_OK | 156 | * @return always PP_Result_OK |
157 | */ | 157 | */ |
158 | int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr) | 158 | static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr) |
159 | { | 159 | { |
160 | if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK) | 160 | if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK) |
161 | smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable); | 161 | smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable); |
@@ -802,7 +802,7 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr) | |||
802 | return 0; | 802 | return 0; |
803 | } | 803 | } |
804 | 804 | ||
805 | int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) | 805 | static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) |
806 | { | 806 | { |
807 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 807 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
808 | 808 | ||
@@ -1153,7 +1153,7 @@ static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) | |||
1153 | return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); | 1153 | return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); |
1154 | } | 1154 | } |
1155 | 1155 | ||
1156 | int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr) | 1156 | static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr) |
1157 | { | 1157 | { |
1158 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 1158 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
1159 | data->pcie_performance_request = true; | 1159 | data->pcie_performance_request = true; |
@@ -1161,7 +1161,7 @@ int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr) | |||
1161 | return 0; | 1161 | return 0; |
1162 | } | 1162 | } |
1163 | 1163 | ||
1164 | int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) | 1164 | static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) |
1165 | { | 1165 | { |
1166 | int tmp_result = 0; | 1166 | int tmp_result = 0; |
1167 | int result = 0; | 1167 | int result = 0; |
@@ -1352,6 +1352,8 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) | |||
1352 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 1352 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
1353 | struct phm_ppt_v1_information *table_info = | 1353 | struct phm_ppt_v1_information *table_info = |
1354 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 1354 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
1355 | struct cgs_system_info sys_info = {0}; | ||
1356 | int result; | ||
1355 | 1357 | ||
1356 | data->dll_default_on = false; | 1358 | data->dll_default_on = false; |
1357 | data->mclk_dpm0_activity_target = 0xa; | 1359 | data->mclk_dpm0_activity_target = 0xa; |
@@ -1439,6 +1441,18 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) | |||
1439 | data->pcie_lane_performance.min = 16; | 1441 | data->pcie_lane_performance.min = 16; |
1440 | data->pcie_lane_power_saving.max = 0; | 1442 | data->pcie_lane_power_saving.max = 0; |
1441 | data->pcie_lane_power_saving.min = 16; | 1443 | data->pcie_lane_power_saving.min = 16; |
1444 | |||
1445 | sys_info.size = sizeof(struct cgs_system_info); | ||
1446 | sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS; | ||
1447 | result = cgs_query_system_info(hwmgr->device, &sys_info); | ||
1448 | if (!result) { | ||
1449 | if (sys_info.value & AMD_PG_SUPPORT_UVD) | ||
1450 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | ||
1451 | PHM_PlatformCaps_UVDPowerGating); | ||
1452 | if (sys_info.value & AMD_PG_SUPPORT_VCE) | ||
1453 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | ||
1454 | PHM_PlatformCaps_VCEPowerGating); | ||
1455 | } | ||
1442 | } | 1456 | } |
1443 | 1457 | ||
1444 | /** | 1458 | /** |
@@ -1864,7 +1878,7 @@ static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr) | |||
1864 | return 0; | 1878 | return 0; |
1865 | } | 1879 | } |
1866 | 1880 | ||
1867 | int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr) | 1881 | static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr) |
1868 | { | 1882 | { |
1869 | struct phm_ppt_v1_information *table_info = | 1883 | struct phm_ppt_v1_information *table_info = |
1870 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 1884 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
@@ -2253,7 +2267,7 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr) | |||
2253 | return 0; | 2267 | return 0; |
2254 | } | 2268 | } |
2255 | 2269 | ||
2256 | int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) | 2270 | static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) |
2257 | { | 2271 | { |
2258 | struct smu7_hwmgr *data; | 2272 | struct smu7_hwmgr *data; |
2259 | int result; | 2273 | int result; |
@@ -3672,14 +3686,16 @@ static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f | |||
3672 | PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm); | 3686 | PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm); |
3673 | } | 3687 | } |
3674 | 3688 | ||
3675 | int smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) | 3689 | static int |
3690 | smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) | ||
3676 | { | 3691 | { |
3677 | PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; | 3692 | PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; |
3678 | 3693 | ||
3679 | return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1; | 3694 | return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1; |
3680 | } | 3695 | } |
3681 | 3696 | ||
3682 | int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) | 3697 | static int |
3698 | smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) | ||
3683 | { | 3699 | { |
3684 | uint32_t num_active_displays = 0; | 3700 | uint32_t num_active_displays = 0; |
3685 | struct cgs_display_info info = {0}; | 3701 | struct cgs_display_info info = {0}; |
@@ -3701,7 +3717,7 @@ int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) | |||
3701 | * @param hwmgr the address of the powerplay hardware manager. | 3717 | * @param hwmgr the address of the powerplay hardware manager. |
3702 | * @return always OK | 3718 | * @return always OK |
3703 | */ | 3719 | */ |
3704 | int smu7_program_display_gap(struct pp_hwmgr *hwmgr) | 3720 | static int smu7_program_display_gap(struct pp_hwmgr *hwmgr) |
3705 | { | 3721 | { |
3706 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 3722 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
3707 | uint32_t num_active_displays = 0; | 3723 | uint32_t num_active_displays = 0; |
@@ -3751,7 +3767,7 @@ int smu7_program_display_gap(struct pp_hwmgr *hwmgr) | |||
3751 | return 0; | 3767 | return 0; |
3752 | } | 3768 | } |
3753 | 3769 | ||
3754 | int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr) | 3770 | static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr) |
3755 | { | 3771 | { |
3756 | return smu7_program_display_gap(hwmgr); | 3772 | return smu7_program_display_gap(hwmgr); |
3757 | } | 3773 | } |
@@ -3775,13 +3791,14 @@ static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f | |||
3775 | PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm); | 3791 | PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm); |
3776 | } | 3792 | } |
3777 | 3793 | ||
3778 | int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr, | 3794 | static int smu7_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr, |
3779 | const void *thermal_interrupt_info) | 3795 | const void *thermal_interrupt_info) |
3780 | { | 3796 | { |
3781 | return 0; | 3797 | return 0; |
3782 | } | 3798 | } |
3783 | 3799 | ||
3784 | bool smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) | 3800 | static bool |
3801 | smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) | ||
3785 | { | 3802 | { |
3786 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 3803 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
3787 | bool is_update_required = false; | 3804 | bool is_update_required = false; |
@@ -3810,7 +3827,9 @@ static inline bool smu7_are_power_levels_equal(const struct smu7_performance_lev | |||
3810 | (pl1->pcie_lane == pl2->pcie_lane)); | 3827 | (pl1->pcie_lane == pl2->pcie_lane)); |
3811 | } | 3828 | } |
3812 | 3829 | ||
3813 | int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal) | 3830 | static int smu7_check_states_equal(struct pp_hwmgr *hwmgr, |
3831 | const struct pp_hw_power_state *pstate1, | ||
3832 | const struct pp_hw_power_state *pstate2, bool *equal) | ||
3814 | { | 3833 | { |
3815 | const struct smu7_power_state *psa; | 3834 | const struct smu7_power_state *psa; |
3816 | const struct smu7_power_state *psb; | 3835 | const struct smu7_power_state *psb; |
@@ -3843,7 +3862,7 @@ int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_sta | |||
3843 | return 0; | 3862 | return 0; |
3844 | } | 3863 | } |
3845 | 3864 | ||
3846 | int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr) | 3865 | static int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr) |
3847 | { | 3866 | { |
3848 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 3867 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
3849 | 3868 | ||
@@ -3972,7 +3991,7 @@ static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr) | |||
3972 | return 0; | 3991 | return 0; |
3973 | } | 3992 | } |
3974 | 3993 | ||
3975 | int smu7_setup_asic_task(struct pp_hwmgr *hwmgr) | 3994 | static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr) |
3976 | { | 3995 | { |
3977 | int tmp_result, result = 0; | 3996 | int tmp_result, result = 0; |
3978 | 3997 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h index 3fb5e57a378b..eb3e83d7af31 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h | |||
@@ -359,6 +359,7 @@ struct amd_powerplay_funcs { | |||
359 | int (*get_mclk_od)(void *handle); | 359 | int (*get_mclk_od)(void *handle); |
360 | int (*set_mclk_od)(void *handle, uint32_t value); | 360 | int (*set_mclk_od)(void *handle, uint32_t value); |
361 | int (*read_sensor)(void *handle, int idx, int32_t *value); | 361 | int (*read_sensor)(void *handle, int idx, int32_t *value); |
362 | struct amd_vce_state* (*get_vce_clock_state)(void *handle, unsigned idx); | ||
362 | }; | 363 | }; |
363 | 364 | ||
364 | struct amd_powerplay { | 365 | struct amd_powerplay { |
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 4f0fedd1e9d3..e38b999e3235 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | |||
@@ -367,7 +367,7 @@ struct pp_table_func { | |||
367 | int (*pptable_get_vce_state_table_entry)( | 367 | int (*pptable_get_vce_state_table_entry)( |
368 | struct pp_hwmgr *hwmgr, | 368 | struct pp_hwmgr *hwmgr, |
369 | unsigned long i, | 369 | unsigned long i, |
370 | struct pp_vce_state *vce_state, | 370 | struct amd_vce_state *vce_state, |
371 | void **clock_info, | 371 | void **clock_info, |
372 | unsigned long *flag); | 372 | unsigned long *flag); |
373 | }; | 373 | }; |
@@ -586,18 +586,6 @@ struct phm_microcode_version_info { | |||
586 | uint32_t NB; | 586 | uint32_t NB; |
587 | }; | 587 | }; |
588 | 588 | ||
589 | #define PP_MAX_VCE_LEVELS 6 | ||
590 | |||
591 | enum PP_VCE_LEVEL { | ||
592 | PP_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */ | ||
593 | PP_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */ | ||
594 | PP_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */ | ||
595 | PP_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */ | ||
596 | PP_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */ | ||
597 | PP_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */ | ||
598 | }; | ||
599 | |||
600 | |||
601 | enum PP_TABLE_VERSION { | 589 | enum PP_TABLE_VERSION { |
602 | PP_TABLE_V0 = 0, | 590 | PP_TABLE_V0 = 0, |
603 | PP_TABLE_V1, | 591 | PP_TABLE_V1, |
@@ -620,7 +608,7 @@ struct pp_hwmgr { | |||
620 | void *hardcode_pp_table; | 608 | void *hardcode_pp_table; |
621 | bool need_pp_table_upload; | 609 | bool need_pp_table_upload; |
622 | 610 | ||
623 | struct pp_vce_state vce_states[PP_MAX_VCE_LEVELS]; | 611 | struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS]; |
624 | uint32_t num_vce_state_tables; | 612 | uint32_t num_vce_state_tables; |
625 | 613 | ||
626 | enum amd_dpm_forced_level dpm_level; | 614 | enum amd_dpm_forced_level dpm_level; |
diff --git a/drivers/gpu/drm/amd/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/powerplay/inc/power_state.h index 9ceaed9ac52a..827860fffe78 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/power_state.h +++ b/drivers/gpu/drm/amd/powerplay/inc/power_state.h | |||
@@ -156,15 +156,6 @@ struct pp_power_state { | |||
156 | struct pp_hw_power_state hardware; | 156 | struct pp_hw_power_state hardware; |
157 | }; | 157 | }; |
158 | 158 | ||
159 | |||
160 | /*Structure to hold a VCE state entry*/ | ||
161 | struct pp_vce_state { | ||
162 | uint32_t evclk; | ||
163 | uint32_t ecclk; | ||
164 | uint32_t sclk; | ||
165 | uint32_t mclk; | ||
166 | }; | ||
167 | |||
168 | enum PP_MMProfilingState { | 159 | enum PP_MMProfilingState { |
169 | PP_MMProfilingState_NA = 0, | 160 | PP_MMProfilingState_NA = 0, |
170 | PP_MMProfilingState_Started, | 161 | PP_MMProfilingState_Started, |
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h index 3df5de2cdab0..8fe8ba9434ff 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h | |||
@@ -21,9 +21,6 @@ | |||
21 | * | 21 | * |
22 | */ | 22 | */ |
23 | 23 | ||
24 | extern bool acpi_atcs_functions_supported(void *device, | 24 | bool acpi_atcs_functions_supported(void *device, uint32_t index); |
25 | uint32_t index); | 25 | int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise); |
26 | extern int acpi_pcie_perf_request(void *device, | 26 | bool acpi_atcs_notify_pcie_device_ready(void *device); |
27 | uint8_t perf_req, | ||
28 | bool advertise); | ||
29 | extern bool acpi_atcs_notify_pcie_device_ready(void *device); | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c index 76310ac7ef0d..34523fe6ed6f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c | |||
@@ -2049,7 +2049,7 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) | |||
2049 | return 0; | 2049 | return 0; |
2050 | } | 2050 | } |
2051 | 2051 | ||
2052 | int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) | 2052 | static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) |
2053 | { | 2053 | { |
2054 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | 2054 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
2055 | 2055 | ||
@@ -2125,7 +2125,7 @@ uint32_t fiji_get_offsetof(uint32_t type, uint32_t member) | |||
2125 | return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold); | 2125 | return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold); |
2126 | } | 2126 | } |
2127 | } | 2127 | } |
2128 | printk("cant't get the offset of type %x member %x \n", type, member); | 2128 | printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member); |
2129 | return 0; | 2129 | return 0; |
2130 | } | 2130 | } |
2131 | 2131 | ||
@@ -2150,7 +2150,7 @@ uint32_t fiji_get_mac_definition(uint32_t value) | |||
2150 | return SMU73_MAX_LEVELS_MVDD; | 2150 | return SMU73_MAX_LEVELS_MVDD; |
2151 | } | 2151 | } |
2152 | 2152 | ||
2153 | printk("cant't get the mac of %x \n", value); | 2153 | printk(KERN_WARNING "can't get the mac of %x\n", value); |
2154 | return 0; | 2154 | return 0; |
2155 | } | 2155 | } |
2156 | 2156 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 02fe1df855a9..b86e48fb40d1 100755 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c | |||
@@ -159,7 +159,7 @@ static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr) | |||
159 | return result; | 159 | return result; |
160 | } | 160 | } |
161 | 161 | ||
162 | int fiji_setup_pwr_virus(struct pp_smumgr *smumgr) | 162 | static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr) |
163 | { | 163 | { |
164 | int i, result = -1; | 164 | int i, result = -1; |
165 | uint32_t reg, data; | 165 | uint32_t reg, data; |
@@ -224,7 +224,7 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr) | |||
224 | return result; | 224 | return result; |
225 | } | 225 | } |
226 | 226 | ||
227 | int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr) | 227 | static int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr) |
228 | { | 228 | { |
229 | int result = 0; | 229 | int result = 0; |
230 | uint32_t table_start; | 230 | uint32_t table_start; |
@@ -260,7 +260,7 @@ int fiji_setup_pm_fuse_for_avfs(struct pp_smumgr *smumgr) | |||
260 | return result; | 260 | return result; |
261 | } | 261 | } |
262 | 262 | ||
263 | int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) | 263 | static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) |
264 | { | 264 | { |
265 | int32_t vr_config; | 265 | int32_t vr_config; |
266 | uint32_t table_start; | 266 | uint32_t table_start; |
@@ -299,7 +299,7 @@ int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr) | |||
299 | } | 299 | } |
300 | 300 | ||
301 | /* Work in Progress */ | 301 | /* Work in Progress */ |
302 | int fiji_restore_vft_table(struct pp_smumgr *smumgr) | 302 | static int fiji_restore_vft_table(struct pp_smumgr *smumgr) |
303 | { | 303 | { |
304 | struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); | 304 | struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); |
305 | 305 | ||
@@ -311,7 +311,7 @@ int fiji_restore_vft_table(struct pp_smumgr *smumgr) | |||
311 | } | 311 | } |
312 | 312 | ||
313 | /* Work in Progress */ | 313 | /* Work in Progress */ |
314 | int fiji_save_vft_table(struct pp_smumgr *smumgr) | 314 | static int fiji_save_vft_table(struct pp_smumgr *smumgr) |
315 | { | 315 | { |
316 | struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); | 316 | struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); |
317 | 317 | ||
@@ -322,7 +322,7 @@ int fiji_save_vft_table(struct pp_smumgr *smumgr) | |||
322 | return -EINVAL; | 322 | return -EINVAL; |
323 | } | 323 | } |
324 | 324 | ||
325 | int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started) | 325 | static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started) |
326 | { | 326 | { |
327 | struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); | 327 | struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); |
328 | 328 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c index 8c889caba420..b579f0c175e6 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c | |||
@@ -2140,7 +2140,7 @@ uint32_t iceland_get_offsetof(uint32_t type, uint32_t member) | |||
2140 | return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold); | 2140 | return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold); |
2141 | } | 2141 | } |
2142 | } | 2142 | } |
2143 | printk("cant't get the offset of type %x member %x \n", type, member); | 2143 | printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member); |
2144 | return 0; | 2144 | return 0; |
2145 | } | 2145 | } |
2146 | 2146 | ||
@@ -2163,7 +2163,7 @@ uint32_t iceland_get_mac_definition(uint32_t value) | |||
2163 | return SMU71_MAX_LEVELS_MVDD; | 2163 | return SMU71_MAX_LEVELS_MVDD; |
2164 | } | 2164 | } |
2165 | 2165 | ||
2166 | printk("cant't get the mac of %x \n", value); | 2166 | printk(KERN_WARNING "can't get the mac of %x\n", value); |
2167 | return 0; | 2167 | return 0; |
2168 | } | 2168 | } |
2169 | 2169 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c index 4ccc0b72324d..006b22071685 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c | |||
@@ -2174,7 +2174,7 @@ uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member) | |||
2174 | return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold); | 2174 | return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold); |
2175 | } | 2175 | } |
2176 | } | 2176 | } |
2177 | printk("cant't get the offset of type %x member %x \n", type, member); | 2177 | printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member); |
2178 | return 0; | 2178 | return 0; |
2179 | } | 2179 | } |
2180 | 2180 | ||
@@ -2201,7 +2201,7 @@ uint32_t polaris10_get_mac_definition(uint32_t value) | |||
2201 | return SMU7_UVD_MCLK_HANDSHAKE_DISABLE; | 2201 | return SMU7_UVD_MCLK_HANDSHAKE_DISABLE; |
2202 | } | 2202 | } |
2203 | 2203 | ||
2204 | printk("cant't get the mac of %x \n", value); | 2204 | printk(KERN_WARNING "can't get the mac of %x\n", value); |
2205 | return 0; | 2205 | return 0; |
2206 | } | 2206 | } |
2207 | 2207 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 5c3598ab7dae..f38a68747df0 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c | |||
@@ -118,7 +118,7 @@ static int polaris10_perform_btc(struct pp_smumgr *smumgr) | |||
118 | } | 118 | } |
119 | 119 | ||
120 | 120 | ||
121 | int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) | 121 | static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) |
122 | { | 122 | { |
123 | uint32_t vr_config; | 123 | uint32_t vr_config; |
124 | uint32_t dpm_table_start; | 124 | uint32_t dpm_table_start; |
@@ -172,7 +172,8 @@ int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr) | |||
172 | return 0; | 172 | return 0; |
173 | } | 173 | } |
174 | 174 | ||
175 | int polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT) | 175 | static int |
176 | polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT) | ||
176 | { | 177 | { |
177 | struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); | 178 | struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); |
178 | 179 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index 6af744f42ec9..6df0d6edfdd1 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | |||
@@ -278,6 +278,9 @@ enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type) | |||
278 | case UCODE_ID_RLC_G: | 278 | case UCODE_ID_RLC_G: |
279 | result = CGS_UCODE_ID_RLC_G; | 279 | result = CGS_UCODE_ID_RLC_G; |
280 | break; | 280 | break; |
281 | case UCODE_ID_MEC_STORAGE: | ||
282 | result = CGS_UCODE_ID_STORAGE; | ||
283 | break; | ||
281 | default: | 284 | default: |
282 | break; | 285 | break; |
283 | } | 286 | } |
@@ -452,6 +455,10 @@ int smu7_request_smu_load_fw(struct pp_smumgr *smumgr) | |||
452 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, | 455 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, |
453 | UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), | 456 | UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), |
454 | "Failed to Get Firmware Entry.", return -EINVAL); | 457 | "Failed to Get Firmware Entry.", return -EINVAL); |
458 | if (cgs_is_virtualization_enabled(smumgr->device)) | ||
459 | PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr, | ||
460 | UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]), | ||
461 | "Failed to Get Firmware Entry.", return -EINVAL); | ||
455 | 462 | ||
456 | smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high); | 463 | smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high); |
457 | smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low); | 464 | smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low); |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h index 76352f2423ae..919be435b49c 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h | |||
@@ -28,8 +28,6 @@ | |||
28 | #include <pp_endian.h> | 28 | #include <pp_endian.h> |
29 | 29 | ||
30 | #define SMC_RAM_END 0x40000 | 30 | #define SMC_RAM_END 0x40000 |
31 | #define mmSMC_IND_INDEX_11 0x01AC | ||
32 | #define mmSMC_IND_DATA_11 0x01AD | ||
33 | 31 | ||
34 | struct smu7_buffer_entry { | 32 | struct smu7_buffer_entry { |
35 | uint32_t data_size; | 33 | uint32_t data_size; |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c index de2a24d85f48..d08f6f19b454 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c | |||
@@ -2651,7 +2651,7 @@ uint32_t tonga_get_offsetof(uint32_t type, uint32_t member) | |||
2651 | return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold); | 2651 | return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold); |
2652 | } | 2652 | } |
2653 | } | 2653 | } |
2654 | printk("cant't get the offset of type %x member %x\n", type, member); | 2654 | printk(KERN_WARNING "can't get the offset of type %x member %x\n", type, member); |
2655 | return 0; | 2655 | return 0; |
2656 | } | 2656 | } |
2657 | 2657 | ||
@@ -2675,7 +2675,7 @@ uint32_t tonga_get_mac_definition(uint32_t value) | |||
2675 | case SMU_MAX_LEVELS_MVDD: | 2675 | case SMU_MAX_LEVELS_MVDD: |
2676 | return SMU72_MAX_LEVELS_MVDD; | 2676 | return SMU72_MAX_LEVELS_MVDD; |
2677 | } | 2677 | } |
2678 | printk("cant't get the mac value %x\n", value); | 2678 | printk(KERN_WARNING "can't get the mac value %x\n", value); |
2679 | 2679 | ||
2680 | return 0; | 2680 | return 0; |
2681 | } | 2681 | } |
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index 608df4c90520..7134fdf49210 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c | |||
@@ -230,6 +230,7 @@ struct ttm_bo_driver ast_bo_driver = { | |||
230 | .ttm_tt_populate = ast_ttm_tt_populate, | 230 | .ttm_tt_populate = ast_ttm_tt_populate, |
231 | .ttm_tt_unpopulate = ast_ttm_tt_unpopulate, | 231 | .ttm_tt_unpopulate = ast_ttm_tt_unpopulate, |
232 | .init_mem_type = ast_bo_init_mem_type, | 232 | .init_mem_type = ast_bo_init_mem_type, |
233 | .eviction_valuable = ttm_bo_eviction_valuable, | ||
233 | .evict_flags = ast_bo_evict_flags, | 234 | .evict_flags = ast_bo_evict_flags, |
234 | .move = NULL, | 235 | .move = NULL, |
235 | .verify_access = ast_bo_verify_access, | 236 | .verify_access = ast_bo_verify_access, |
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c index 269cfca9ca06..099a3c688c26 100644 --- a/drivers/gpu/drm/bochs/bochs_mm.c +++ b/drivers/gpu/drm/bochs/bochs_mm.c | |||
@@ -199,6 +199,7 @@ struct ttm_bo_driver bochs_bo_driver = { | |||
199 | .ttm_tt_populate = ttm_pool_populate, | 199 | .ttm_tt_populate = ttm_pool_populate, |
200 | .ttm_tt_unpopulate = ttm_pool_unpopulate, | 200 | .ttm_tt_unpopulate = ttm_pool_unpopulate, |
201 | .init_mem_type = bochs_bo_init_mem_type, | 201 | .init_mem_type = bochs_bo_init_mem_type, |
202 | .eviction_valuable = ttm_bo_eviction_valuable, | ||
202 | .evict_flags = bochs_bo_evict_flags, | 203 | .evict_flags = bochs_bo_evict_flags, |
203 | .move = NULL, | 204 | .move = NULL, |
204 | .verify_access = bochs_bo_verify_access, | 205 | .verify_access = bochs_bo_verify_access, |
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index bb2438dd8733..de52b20800e1 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c | |||
@@ -230,6 +230,7 @@ struct ttm_bo_driver cirrus_bo_driver = { | |||
230 | .ttm_tt_populate = cirrus_ttm_tt_populate, | 230 | .ttm_tt_populate = cirrus_ttm_tt_populate, |
231 | .ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate, | 231 | .ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate, |
232 | .init_mem_type = cirrus_bo_init_mem_type, | 232 | .init_mem_type = cirrus_bo_init_mem_type, |
233 | .eviction_valuable = ttm_bo_eviction_valuable, | ||
233 | .evict_flags = cirrus_bo_evict_flags, | 234 | .evict_flags = cirrus_bo_evict_flags, |
234 | .move = NULL, | 235 | .move = NULL, |
235 | .verify_access = cirrus_bo_verify_access, | 236 | .verify_access = cirrus_bo_verify_access, |
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 919b35f2ad24..83272b456329 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c | |||
@@ -230,6 +230,7 @@ struct ttm_bo_driver mgag200_bo_driver = { | |||
230 | .ttm_tt_populate = mgag200_ttm_tt_populate, | 230 | .ttm_tt_populate = mgag200_ttm_tt_populate, |
231 | .ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate, | 231 | .ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate, |
232 | .init_mem_type = mgag200_bo_init_mem_type, | 232 | .init_mem_type = mgag200_bo_init_mem_type, |
233 | .eviction_valuable = ttm_bo_eviction_valuable, | ||
233 | .evict_flags = mgag200_bo_evict_flags, | 234 | .evict_flags = mgag200_bo_evict_flags, |
234 | .move = NULL, | 235 | .move = NULL, |
235 | .verify_access = mgag200_bo_verify_access, | 236 | .verify_access = mgag200_bo_verify_access, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 343b8659472c..e395cb6f511f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -1561,6 +1561,7 @@ struct ttm_bo_driver nouveau_bo_driver = { | |||
1561 | .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, | 1561 | .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, |
1562 | .invalidate_caches = nouveau_bo_invalidate_caches, | 1562 | .invalidate_caches = nouveau_bo_invalidate_caches, |
1563 | .init_mem_type = nouveau_bo_init_mem_type, | 1563 | .init_mem_type = nouveau_bo_init_mem_type, |
1564 | .eviction_valuable = ttm_bo_eviction_valuable, | ||
1564 | .evict_flags = nouveau_bo_evict_flags, | 1565 | .evict_flags = nouveau_bo_evict_flags, |
1565 | .move_notify = nouveau_bo_move_ntfy, | 1566 | .move_notify = nouveau_bo_move_ntfy, |
1566 | .move = nouveau_bo_move, | 1567 | .move = nouveau_bo_move, |
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index e26c82db948b..11761330a6b8 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c | |||
@@ -387,6 +387,7 @@ static struct ttm_bo_driver qxl_bo_driver = { | |||
387 | .ttm_tt_unpopulate = &qxl_ttm_tt_unpopulate, | 387 | .ttm_tt_unpopulate = &qxl_ttm_tt_unpopulate, |
388 | .invalidate_caches = &qxl_invalidate_caches, | 388 | .invalidate_caches = &qxl_invalidate_caches, |
389 | .init_mem_type = &qxl_init_mem_type, | 389 | .init_mem_type = &qxl_init_mem_type, |
390 | .eviction_valuable = ttm_bo_eviction_valuable, | ||
390 | .evict_flags = &qxl_evict_flags, | 391 | .evict_flags = &qxl_evict_flags, |
391 | .move = &qxl_bo_move, | 392 | .move = &qxl_bo_move, |
392 | .verify_access = &qxl_verify_access, | 393 | .verify_access = &qxl_verify_access, |
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 56bb758f4e33..fa4f8f008e4d 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <drm/radeon_drm.h> | 28 | #include <drm/radeon_drm.h> |
29 | #include "radeon.h" | 29 | #include "radeon.h" |
30 | #include "radeon_audio.h" | 30 | #include "radeon_audio.h" |
31 | #include "radeon_asic.h" | ||
31 | #include "atom.h" | 32 | #include "atom.h" |
32 | #include <linux/backlight.h> | 33 | #include <linux/backlight.h> |
33 | 34 | ||
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index d960d3915408..f8b05090232a 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
@@ -27,6 +27,7 @@ | |||
27 | */ | 27 | */ |
28 | #include <drm/drmP.h> | 28 | #include <drm/drmP.h> |
29 | #include "radeon.h" | 29 | #include "radeon.h" |
30 | #include "radeon_asic.h" | ||
30 | #include "evergreend.h" | 31 | #include "evergreend.h" |
31 | #include "evergreen_reg_safe.h" | 32 | #include "evergreen_reg_safe.h" |
32 | #include "cayman_reg_safe.h" | 33 | #include "cayman_reg_safe.h" |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index b69c8de35bd3..595a19736458 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
29 | #include <drm/drmP.h> | 29 | #include <drm/drmP.h> |
30 | #include "radeon.h" | 30 | #include "radeon.h" |
31 | #include "radeon_asic.h" | ||
31 | #include "r600d.h" | 32 | #include "r600d.h" |
32 | #include "r600_reg_safe.h" | 33 | #include "r600_reg_safe.h" |
33 | 34 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 5df3ec73021b..4134759a6823 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -29,6 +29,7 @@ | |||
29 | 29 | ||
30 | #include "atom.h" | 30 | #include "atom.h" |
31 | #include "atom-bits.h" | 31 | #include "atom-bits.h" |
32 | #include "radeon_asic.h" | ||
32 | 33 | ||
33 | extern void | 34 | extern void |
34 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, | 35 | radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, |
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c index 38e396dae0a9..c1135feb93c1 100644 --- a/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/drivers/gpu/drm/radeon/radeon_clocks.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <drm/radeon_drm.h> | 29 | #include <drm/radeon_drm.h> |
30 | #include "radeon_reg.h" | 30 | #include "radeon_reg.h" |
31 | #include "radeon.h" | 31 | #include "radeon.h" |
32 | #include "radeon_asic.h" | ||
32 | #include "atom.h" | 33 | #include "atom.h" |
33 | 34 | ||
34 | /* 10 khz */ | 35 | /* 10 khz */ |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index eb92aef46e3c..79c9b6f3f013 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -1651,7 +1651,10 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, | |||
1651 | 1651 | ||
1652 | radeon_suspend(rdev); | 1652 | radeon_suspend(rdev); |
1653 | radeon_hpd_fini(rdev); | 1653 | radeon_hpd_fini(rdev); |
1654 | /* evict remaining vram memory */ | 1654 | /* evict remaining vram memory |
1655 | * This second call to evict vram is to evict the gart page table | ||
1656 | * using the CPU. | ||
1657 | */ | ||
1655 | radeon_bo_evict_vram(rdev); | 1658 | radeon_bo_evict_vram(rdev); |
1656 | 1659 | ||
1657 | radeon_agp_suspend(rdev); | 1660 | radeon_agp_suspend(rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index de504ea29c06..6d1237d6e1b8 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c | |||
@@ -223,7 +223,8 @@ radeon_dp_mst_mode_valid(struct drm_connector *connector, | |||
223 | return MODE_OK; | 223 | return MODE_OK; |
224 | } | 224 | } |
225 | 225 | ||
226 | struct drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector) | 226 | static struct |
227 | drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector) | ||
227 | { | 228 | { |
228 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 229 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
229 | 230 | ||
@@ -341,7 +342,8 @@ const struct drm_dp_mst_topology_cbs mst_cbs = { | |||
341 | .hotplug = radeon_dp_mst_hotplug, | 342 | .hotplug = radeon_dp_mst_hotplug, |
342 | }; | 343 | }; |
343 | 344 | ||
344 | struct radeon_connector *radeon_mst_find_connector(struct drm_encoder *encoder) | 345 | static struct |
346 | radeon_connector *radeon_mst_find_connector(struct drm_encoder *encoder) | ||
345 | { | 347 | { |
346 | struct drm_device *dev = encoder->dev; | 348 | struct drm_device *dev = encoder->dev; |
347 | struct drm_connector *connector; | 349 | struct drm_connector *connector; |
@@ -597,7 +599,7 @@ static const struct drm_encoder_helper_funcs radeon_mst_helper_funcs = { | |||
597 | .commit = radeon_mst_encoder_commit, | 599 | .commit = radeon_mst_encoder_commit, |
598 | }; | 600 | }; |
599 | 601 | ||
600 | void radeon_dp_mst_encoder_destroy(struct drm_encoder *encoder) | 602 | static void radeon_dp_mst_encoder_destroy(struct drm_encoder *encoder) |
601 | { | 603 | { |
602 | drm_encoder_cleanup(encoder); | 604 | drm_encoder_cleanup(encoder); |
603 | kfree(encoder); | 605 | kfree(encoder); |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 868c3ba2efaa..222a1fa41d7c 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <drm/drm_crtc_helper.h> | 27 | #include <drm/drm_crtc_helper.h> |
28 | #include <drm/radeon_drm.h> | 28 | #include <drm/radeon_drm.h> |
29 | #include "radeon.h" | 29 | #include "radeon.h" |
30 | #include "radeon_asic.h" | ||
30 | #include "atom.h" | 31 | #include "atom.h" |
31 | #include <linux/backlight.h> | 32 | #include <linux/backlight.h> |
32 | #ifdef CONFIG_PMAC_BACKLIGHT | 33 | #ifdef CONFIG_PMAC_BACKLIGHT |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 4b6542538ff9..326ad068c15a 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -47,6 +47,7 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev); | |||
47 | static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); | 47 | static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); |
48 | static void radeon_pm_update_profile(struct radeon_device *rdev); | 48 | static void radeon_pm_update_profile(struct radeon_device *rdev); |
49 | static void radeon_pm_set_clocks(struct radeon_device *rdev); | 49 | static void radeon_pm_set_clocks(struct radeon_device *rdev); |
50 | static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev); | ||
50 | 51 | ||
51 | int radeon_pm_get_type_index(struct radeon_device *rdev, | 52 | int radeon_pm_get_type_index(struct radeon_device *rdev, |
52 | enum radeon_pm_state_type ps_type, | 53 | enum radeon_pm_state_type ps_type, |
@@ -79,6 +80,8 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev) | |||
79 | radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); | 80 | radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); |
80 | } | 81 | } |
81 | mutex_unlock(&rdev->pm.mutex); | 82 | mutex_unlock(&rdev->pm.mutex); |
83 | /* allow new DPM state to be picked */ | ||
84 | radeon_pm_compute_clocks_dpm(rdev); | ||
82 | } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { | 85 | } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { |
83 | if (rdev->pm.profile == PM_PROFILE_AUTO) { | 86 | if (rdev->pm.profile == PM_PROFILE_AUTO) { |
84 | mutex_lock(&rdev->pm.mutex); | 87 | mutex_lock(&rdev->pm.mutex); |
@@ -882,7 +885,8 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, | |||
882 | dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; | 885 | dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; |
883 | /* balanced states don't exist at the moment */ | 886 | /* balanced states don't exist at the moment */ |
884 | if (dpm_state == POWER_STATE_TYPE_BALANCED) | 887 | if (dpm_state == POWER_STATE_TYPE_BALANCED) |
885 | dpm_state = POWER_STATE_TYPE_PERFORMANCE; | 888 | dpm_state = rdev->pm.dpm.ac_power ? |
889 | POWER_STATE_TYPE_PERFORMANCE : POWER_STATE_TYPE_BATTERY; | ||
886 | 890 | ||
887 | restart_search: | 891 | restart_search: |
888 | /* Pick the best power state based on current conditions */ | 892 | /* Pick the best power state based on current conditions */ |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 3de5e6e21662..0cf03ccbf0a7 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -863,6 +863,7 @@ static struct ttm_bo_driver radeon_bo_driver = { | |||
863 | .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate, | 863 | .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate, |
864 | .invalidate_caches = &radeon_invalidate_caches, | 864 | .invalidate_caches = &radeon_invalidate_caches, |
865 | .init_mem_type = &radeon_init_mem_type, | 865 | .init_mem_type = &radeon_init_mem_type, |
866 | .eviction_valuable = ttm_bo_eviction_valuable, | ||
866 | .evict_flags = &radeon_evict_flags, | 867 | .evict_flags = &radeon_evict_flags, |
867 | .move = &radeon_bo_move, | 868 | .move = &radeon_bo_move, |
868 | .verify_access = &radeon_verify_access, | 869 | .verify_access = &radeon_verify_access, |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index e402be8821c4..143280dc0851 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -7858,7 +7858,7 @@ static void si_program_aspm(struct radeon_device *rdev) | |||
7858 | } | 7858 | } |
7859 | } | 7859 | } |
7860 | 7860 | ||
7861 | int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev) | 7861 | static int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev) |
7862 | { | 7862 | { |
7863 | unsigned i; | 7863 | unsigned i; |
7864 | 7864 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index fc6217dfe401..31fcf11a2831 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -717,6 +717,20 @@ out: | |||
717 | return ret; | 717 | return ret; |
718 | } | 718 | } |
719 | 719 | ||
720 | bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, | ||
721 | const struct ttm_place *place) | ||
722 | { | ||
723 | /* Don't evict this BO if it's outside of the | ||
724 | * requested placement range | ||
725 | */ | ||
726 | if (place->fpfn >= (bo->mem.start + bo->mem.size) || | ||
727 | (place->lpfn && place->lpfn <= bo->mem.start)) | ||
728 | return false; | ||
729 | |||
730 | return true; | ||
731 | } | ||
732 | EXPORT_SYMBOL(ttm_bo_eviction_valuable); | ||
733 | |||
720 | static int ttm_mem_evict_first(struct ttm_bo_device *bdev, | 734 | static int ttm_mem_evict_first(struct ttm_bo_device *bdev, |
721 | uint32_t mem_type, | 735 | uint32_t mem_type, |
722 | const struct ttm_place *place, | 736 | const struct ttm_place *place, |
@@ -731,21 +745,16 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, | |||
731 | spin_lock(&glob->lru_lock); | 745 | spin_lock(&glob->lru_lock); |
732 | list_for_each_entry(bo, &man->lru, lru) { | 746 | list_for_each_entry(bo, &man->lru, lru) { |
733 | ret = __ttm_bo_reserve(bo, false, true, NULL); | 747 | ret = __ttm_bo_reserve(bo, false, true, NULL); |
734 | if (!ret) { | 748 | if (ret) |
735 | if (place && (place->fpfn || place->lpfn)) { | 749 | continue; |
736 | /* Don't evict this BO if it's outside of the | ||
737 | * requested placement range | ||
738 | */ | ||
739 | if (place->fpfn >= (bo->mem.start + bo->mem.size) || | ||
740 | (place->lpfn && place->lpfn <= bo->mem.start)) { | ||
741 | __ttm_bo_unreserve(bo); | ||
742 | ret = -EBUSY; | ||
743 | continue; | ||
744 | } | ||
745 | } | ||
746 | 750 | ||
747 | break; | 751 | if (place && !bdev->driver->eviction_valuable(bo, place)) { |
752 | __ttm_bo_unreserve(bo); | ||
753 | ret = -EBUSY; | ||
754 | continue; | ||
748 | } | 755 | } |
756 | |||
757 | break; | ||
749 | } | 758 | } |
750 | 759 | ||
751 | if (ret) { | 760 | if (ret) { |
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c index 80482ac5f95d..4a1de9f81193 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ttm.c +++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c | |||
@@ -425,6 +425,7 @@ static struct ttm_bo_driver virtio_gpu_bo_driver = { | |||
425 | .ttm_tt_unpopulate = &virtio_gpu_ttm_tt_unpopulate, | 425 | .ttm_tt_unpopulate = &virtio_gpu_ttm_tt_unpopulate, |
426 | .invalidate_caches = &virtio_gpu_invalidate_caches, | 426 | .invalidate_caches = &virtio_gpu_invalidate_caches, |
427 | .init_mem_type = &virtio_gpu_init_mem_type, | 427 | .init_mem_type = &virtio_gpu_init_mem_type, |
428 | .eviction_valuable = ttm_bo_eviction_valuable, | ||
428 | .evict_flags = &virtio_gpu_evict_flags, | 429 | .evict_flags = &virtio_gpu_evict_flags, |
429 | .move = &virtio_gpu_bo_move, | 430 | .move = &virtio_gpu_bo_move, |
430 | .verify_access = &virtio_gpu_verify_access, | 431 | .verify_access = &virtio_gpu_verify_access, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 78b75ee3c931..c894a48a74a6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | |||
@@ -849,6 +849,7 @@ struct ttm_bo_driver vmw_bo_driver = { | |||
849 | .ttm_tt_unpopulate = &vmw_ttm_unpopulate, | 849 | .ttm_tt_unpopulate = &vmw_ttm_unpopulate, |
850 | .invalidate_caches = vmw_invalidate_caches, | 850 | .invalidate_caches = vmw_invalidate_caches, |
851 | .init_mem_type = vmw_init_mem_type, | 851 | .init_mem_type = vmw_init_mem_type, |
852 | .eviction_valuable = ttm_bo_eviction_valuable, | ||
852 | .evict_flags = vmw_evict_flags, | 853 | .evict_flags = vmw_evict_flags, |
853 | .move = NULL, | 854 | .move = NULL, |
854 | .verify_access = vmw_verify_access, | 855 | .verify_access = vmw_verify_access, |
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 9eb940d6755f..bb6a3357a817 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h | |||
@@ -47,6 +47,8 @@ struct drm_mm_node; | |||
47 | 47 | ||
48 | struct ttm_placement; | 48 | struct ttm_placement; |
49 | 49 | ||
50 | struct ttm_place; | ||
51 | |||
50 | /** | 52 | /** |
51 | * struct ttm_bus_placement | 53 | * struct ttm_bus_placement |
52 | * | 54 | * |
@@ -396,6 +398,17 @@ extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, | |||
396 | int resched); | 398 | int resched); |
397 | 399 | ||
398 | /** | 400 | /** |
401 | * ttm_bo_eviction_valuable | ||
402 | * | ||
403 | * @bo: The buffer object to evict | ||
404 | * @place: the placement we need to make room for | ||
405 | * | ||
406 | * Check if it is valuable to evict the BO to make room for the given placement. | ||
407 | */ | ||
408 | bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, | ||
409 | const struct ttm_place *place); | ||
410 | |||
411 | /** | ||
399 | * ttm_bo_synccpu_write_grab | 412 | * ttm_bo_synccpu_write_grab |
400 | * | 413 | * |
401 | * @bo: The buffer object: | 414 | * @bo: The buffer object: |
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 4f0a92185995..d3d83dfe89e2 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
@@ -371,9 +371,21 @@ struct ttm_bo_driver { | |||
371 | * submission as a consequence. | 371 | * submission as a consequence. |
372 | */ | 372 | */ |
373 | 373 | ||
374 | int (*invalidate_caches) (struct ttm_bo_device *bdev, uint32_t flags); | 374 | int (*invalidate_caches)(struct ttm_bo_device *bdev, uint32_t flags); |
375 | int (*init_mem_type) (struct ttm_bo_device *bdev, uint32_t type, | 375 | int (*init_mem_type)(struct ttm_bo_device *bdev, uint32_t type, |
376 | struct ttm_mem_type_manager *man); | 376 | struct ttm_mem_type_manager *man); |
377 | |||
378 | /** | ||
379 | * struct ttm_bo_driver member eviction_valuable | ||
380 | * | ||
381 | * @bo: the buffer object to be evicted | ||
382 | * @place: placement we need room for | ||
383 | * | ||
384 | * Check with the driver if it is valuable to evict a BO to make room | ||
385 | * for a certain placement. | ||
386 | */ | ||
387 | bool (*eviction_valuable)(struct ttm_buffer_object *bo, | ||
388 | const struct ttm_place *place); | ||
377 | /** | 389 | /** |
378 | * struct ttm_bo_driver member evict_flags: | 390 | * struct ttm_bo_driver member evict_flags: |
379 | * | 391 | * |
@@ -384,8 +396,9 @@ struct ttm_bo_driver { | |||
384 | * finished, they'll end up in bo->mem.flags | 396 | * finished, they'll end up in bo->mem.flags |
385 | */ | 397 | */ |
386 | 398 | ||
387 | void(*evict_flags) (struct ttm_buffer_object *bo, | 399 | void (*evict_flags)(struct ttm_buffer_object *bo, |
388 | struct ttm_placement *placement); | 400 | struct ttm_placement *placement); |
401 | |||
389 | /** | 402 | /** |
390 | * struct ttm_bo_driver member move: | 403 | * struct ttm_bo_driver member move: |
391 | * | 404 | * |
@@ -399,10 +412,9 @@ struct ttm_bo_driver { | |||
399 | * | 412 | * |
400 | * Move a buffer between two memory regions. | 413 | * Move a buffer between two memory regions. |
401 | */ | 414 | */ |
402 | int (*move) (struct ttm_buffer_object *bo, | 415 | int (*move)(struct ttm_buffer_object *bo, bool evict, |
403 | bool evict, bool interruptible, | 416 | bool interruptible, bool no_wait_gpu, |
404 | bool no_wait_gpu, | 417 | struct ttm_mem_reg *new_mem); |
405 | struct ttm_mem_reg *new_mem); | ||
406 | 418 | ||
407 | /** | 419 | /** |
408 | * struct ttm_bo_driver_member verify_access | 420 | * struct ttm_bo_driver_member verify_access |
@@ -416,8 +428,8 @@ struct ttm_bo_driver { | |||
416 | * access for all buffer objects. | 428 | * access for all buffer objects. |
417 | * This function should return 0 if access is granted, -EPERM otherwise. | 429 | * This function should return 0 if access is granted, -EPERM otherwise. |
418 | */ | 430 | */ |
419 | int (*verify_access) (struct ttm_buffer_object *bo, | 431 | int (*verify_access)(struct ttm_buffer_object *bo, |
420 | struct file *filp); | 432 | struct file *filp); |
421 | 433 | ||
422 | /* hook to notify driver about a driver move so it | 434 | /* hook to notify driver about a driver move so it |
423 | * can do tiling things */ | 435 | * can do tiling things */ |
@@ -430,7 +442,7 @@ struct ttm_bo_driver { | |||
430 | /** | 442 | /** |
431 | * notify the driver that we're about to swap out this bo | 443 | * notify the driver that we're about to swap out this bo |
432 | */ | 444 | */ |
433 | void (*swap_notify) (struct ttm_buffer_object *bo); | 445 | void (*swap_notify)(struct ttm_buffer_object *bo); |
434 | 446 | ||
435 | /** | 447 | /** |
436 | * Driver callback on when mapping io memory (for bo_move_memcpy | 448 | * Driver callback on when mapping io memory (for bo_move_memcpy |
@@ -438,8 +450,10 @@ struct ttm_bo_driver { | |||
438 | * the mapping is not use anymore. io_mem_reserve & io_mem_free | 450 | * the mapping is not use anymore. io_mem_reserve & io_mem_free |
439 | * are balanced. | 451 | * are balanced. |
440 | */ | 452 | */ |
441 | int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); | 453 | int (*io_mem_reserve)(struct ttm_bo_device *bdev, |
442 | void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); | 454 | struct ttm_mem_reg *mem); |
455 | void (*io_mem_free)(struct ttm_bo_device *bdev, | ||
456 | struct ttm_mem_reg *mem); | ||
443 | 457 | ||
444 | /** | 458 | /** |
445 | * Optional driver callback for when BO is removed from the LRU. | 459 | * Optional driver callback for when BO is removed from the LRU. |
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index d6b5a21f3d3c..4684f378f046 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h | |||
@@ -81,6 +81,8 @@ extern "C" { | |||
81 | #define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3) | 81 | #define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3) |
82 | /* Flag that create shadow bo(GTT) while allocating vram bo */ | 82 | /* Flag that create shadow bo(GTT) while allocating vram bo */ |
83 | #define AMDGPU_GEM_CREATE_SHADOW (1 << 4) | 83 | #define AMDGPU_GEM_CREATE_SHADOW (1 << 4) |
84 | /* Flag that allocating the BO should use linear VRAM */ | ||
85 | #define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5) | ||
84 | 86 | ||
85 | struct drm_amdgpu_gem_create_in { | 87 | struct drm_amdgpu_gem_create_in { |
86 | /** the requested memory size */ | 88 | /** the requested memory size */ |
@@ -436,6 +438,7 @@ struct drm_amdgpu_cs_chunk_data { | |||
436 | * | 438 | * |
437 | */ | 439 | */ |
438 | #define AMDGPU_IDS_FLAGS_FUSION 0x1 | 440 | #define AMDGPU_IDS_FLAGS_FUSION 0x1 |
441 | #define AMDGPU_IDS_FLAGS_PREEMPTION 0x2 | ||
439 | 442 | ||
440 | /* indicate if acceleration can be working */ | 443 | /* indicate if acceleration can be working */ |
441 | #define AMDGPU_INFO_ACCEL_WORKING 0x00 | 444 | #define AMDGPU_INFO_ACCEL_WORKING 0x00 |
@@ -487,6 +490,10 @@ struct drm_amdgpu_cs_chunk_data { | |||
487 | #define AMDGPU_INFO_VIS_VRAM_USAGE 0x17 | 490 | #define AMDGPU_INFO_VIS_VRAM_USAGE 0x17 |
488 | /* number of TTM buffer evictions */ | 491 | /* number of TTM buffer evictions */ |
489 | #define AMDGPU_INFO_NUM_EVICTIONS 0x18 | 492 | #define AMDGPU_INFO_NUM_EVICTIONS 0x18 |
493 | /* Query memory about VRAM and GTT domains */ | ||
494 | #define AMDGPU_INFO_MEMORY 0x19 | ||
495 | /* Query vce clock table */ | ||
496 | #define AMDGPU_INFO_VCE_CLOCK_TABLE 0x1A | ||
490 | 497 | ||
491 | #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0 | 498 | #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0 |
492 | #define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff | 499 | #define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff |
@@ -572,6 +579,34 @@ struct drm_amdgpu_info_vram_gtt { | |||
572 | __u64 gtt_size; | 579 | __u64 gtt_size; |
573 | }; | 580 | }; |
574 | 581 | ||
582 | struct drm_amdgpu_heap_info { | ||
583 | /** max. physical memory */ | ||
584 | __u64 total_heap_size; | ||
585 | |||
586 | /** Theoretical max. available memory in the given heap */ | ||
587 | __u64 usable_heap_size; | ||
588 | |||
589 | /** | ||
590 | * Number of bytes allocated in the heap. This includes all processes | ||
591 | * and private allocations in the kernel. It changes when new buffers | ||
592 | * are allocated, freed, and moved. It cannot be larger than | ||
593 | * heap_size. | ||
594 | */ | ||
595 | __u64 heap_usage; | ||
596 | |||
597 | /** | ||
598 | * Theoretical possible max. size of buffer which | ||
599 | * could be allocated in the given heap | ||
600 | */ | ||
601 | __u64 max_allocation; | ||
602 | }; | ||
603 | |||
604 | struct drm_amdgpu_memory_info { | ||
605 | struct drm_amdgpu_heap_info vram; | ||
606 | struct drm_amdgpu_heap_info cpu_accessible_vram; | ||
607 | struct drm_amdgpu_heap_info gtt; | ||
608 | }; | ||
609 | |||
575 | struct drm_amdgpu_info_firmware { | 610 | struct drm_amdgpu_info_firmware { |
576 | __u32 ver; | 611 | __u32 ver; |
577 | __u32 feature; | 612 | __u32 feature; |
@@ -645,6 +680,24 @@ struct drm_amdgpu_info_hw_ip { | |||
645 | __u32 _pad; | 680 | __u32 _pad; |
646 | }; | 681 | }; |
647 | 682 | ||
683 | #define AMDGPU_VCE_CLOCK_TABLE_ENTRIES 6 | ||
684 | |||
685 | struct drm_amdgpu_info_vce_clock_table_entry { | ||
686 | /** System clock */ | ||
687 | __u32 sclk; | ||
688 | /** Memory clock */ | ||
689 | __u32 mclk; | ||
690 | /** VCE clock */ | ||
691 | __u32 eclk; | ||
692 | __u32 pad; | ||
693 | }; | ||
694 | |||
695 | struct drm_amdgpu_info_vce_clock_table { | ||
696 | struct drm_amdgpu_info_vce_clock_table_entry entries[AMDGPU_VCE_CLOCK_TABLE_ENTRIES]; | ||
697 | __u32 num_valid_entries; | ||
698 | __u32 pad; | ||
699 | }; | ||
700 | |||
648 | /* | 701 | /* |
649 | * Supported GPU families | 702 | * Supported GPU families |
650 | */ | 703 | */ |