aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu.h')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h227
1 files changed, 92 insertions, 135 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 6647fb26ef25..5a5f04d0902d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -79,6 +79,8 @@ extern int amdgpu_bapm;
79extern int amdgpu_deep_color; 79extern int amdgpu_deep_color;
80extern int amdgpu_vm_size; 80extern int amdgpu_vm_size;
81extern int amdgpu_vm_block_size; 81extern int amdgpu_vm_block_size;
82extern int amdgpu_vm_fault_stop;
83extern int amdgpu_vm_debug;
82extern int amdgpu_enable_scheduler; 84extern int amdgpu_enable_scheduler;
83extern int amdgpu_sched_jobs; 85extern int amdgpu_sched_jobs;
84extern int amdgpu_sched_hw_submission; 86extern int amdgpu_sched_hw_submission;
@@ -343,7 +345,6 @@ struct amdgpu_ring_funcs {
343 /* testing functions */ 345 /* testing functions */
344 int (*test_ring)(struct amdgpu_ring *ring); 346 int (*test_ring)(struct amdgpu_ring *ring);
345 int (*test_ib)(struct amdgpu_ring *ring); 347 int (*test_ib)(struct amdgpu_ring *ring);
346 bool (*is_lockup)(struct amdgpu_ring *ring);
347 /* insert NOP packets */ 348 /* insert NOP packets */
348 void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); 349 void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
349}; 350};
@@ -388,7 +389,6 @@ struct amdgpu_clock {
388 * Fences. 389 * Fences.
389 */ 390 */
390struct amdgpu_fence_driver { 391struct amdgpu_fence_driver {
391 struct amdgpu_ring *ring;
392 uint64_t gpu_addr; 392 uint64_t gpu_addr;
393 volatile uint32_t *cpu_addr; 393 volatile uint32_t *cpu_addr;
394 /* sync_seq is protected by ring emission lock */ 394 /* sync_seq is protected by ring emission lock */
@@ -397,14 +397,13 @@ struct amdgpu_fence_driver {
397 bool initialized; 397 bool initialized;
398 struct amdgpu_irq_src *irq_src; 398 struct amdgpu_irq_src *irq_src;
399 unsigned irq_type; 399 unsigned irq_type;
400 struct delayed_work lockup_work; 400 struct timer_list fallback_timer;
401 wait_queue_head_t fence_queue; 401 wait_queue_head_t fence_queue;
402}; 402};
403 403
404/* some special values for the owner field */ 404/* some special values for the owner field */
405#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul) 405#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
406#define AMDGPU_FENCE_OWNER_VM ((void*)1ul) 406#define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
407#define AMDGPU_FENCE_OWNER_MOVE ((void*)2ul)
408 407
409#define AMDGPU_FENCE_FLAG_64BIT (1 << 0) 408#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
410#define AMDGPU_FENCE_FLAG_INT (1 << 1) 409#define AMDGPU_FENCE_FLAG_INT (1 << 1)
@@ -446,58 +445,11 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
446int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); 445int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
447unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); 446unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
448 447
449signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
450 struct fence **array,
451 uint32_t count,
452 bool intr,
453 signed long t);
454struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
455void amdgpu_fence_unref(struct amdgpu_fence **fence);
456
457bool amdgpu_fence_need_sync(struct amdgpu_fence *fence, 448bool amdgpu_fence_need_sync(struct amdgpu_fence *fence,
458 struct amdgpu_ring *ring); 449 struct amdgpu_ring *ring);
459void amdgpu_fence_note_sync(struct amdgpu_fence *fence, 450void amdgpu_fence_note_sync(struct amdgpu_fence *fence,
460 struct amdgpu_ring *ring); 451 struct amdgpu_ring *ring);
461 452
462static inline struct amdgpu_fence *amdgpu_fence_later(struct amdgpu_fence *a,
463 struct amdgpu_fence *b)
464{
465 if (!a) {
466 return b;
467 }
468
469 if (!b) {
470 return a;
471 }
472
473 BUG_ON(a->ring != b->ring);
474
475 if (a->seq > b->seq) {
476 return a;
477 } else {
478 return b;
479 }
480}
481
482static inline bool amdgpu_fence_is_earlier(struct amdgpu_fence *a,
483 struct amdgpu_fence *b)
484{
485 if (!a) {
486 return false;
487 }
488
489 if (!b) {
490 return true;
491 }
492
493 BUG_ON(a->ring != b->ring);
494
495 return a->seq < b->seq;
496}
497
498int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
499 void *owner, struct amdgpu_fence **fence);
500
501/* 453/*
502 * TTM. 454 * TTM.
503 */ 455 */
@@ -544,6 +496,7 @@ struct amdgpu_bo_va_mapping {
544 496
545/* bo virtual addresses in a specific vm */ 497/* bo virtual addresses in a specific vm */
546struct amdgpu_bo_va { 498struct amdgpu_bo_va {
499 struct mutex mutex;
547 /* protected by bo being reserved */ 500 /* protected by bo being reserved */
548 struct list_head bo_list; 501 struct list_head bo_list;
549 struct fence *last_pt_update; 502 struct fence *last_pt_update;
@@ -586,6 +539,7 @@ struct amdgpu_bo {
586 /* Constant after initialization */ 539 /* Constant after initialization */
587 struct amdgpu_device *adev; 540 struct amdgpu_device *adev;
588 struct drm_gem_object gem_base; 541 struct drm_gem_object gem_base;
542 struct amdgpu_bo *parent;
589 543
590 struct ttm_bo_kmap_obj dma_buf_vmap; 544 struct ttm_bo_kmap_obj dma_buf_vmap;
591 pid_t pid; 545 pid_t pid;
@@ -708,7 +662,7 @@ void amdgpu_semaphore_free(struct amdgpu_device *adev,
708 */ 662 */
709struct amdgpu_sync { 663struct amdgpu_sync {
710 struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS]; 664 struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS];
711 struct amdgpu_fence *sync_to[AMDGPU_MAX_RINGS]; 665 struct fence *sync_to[AMDGPU_MAX_RINGS];
712 DECLARE_HASHTABLE(fences, 4); 666 DECLARE_HASHTABLE(fences, 4);
713 struct fence *last_vm_update; 667 struct fence *last_vm_update;
714}; 668};
@@ -905,8 +859,6 @@ struct amdgpu_ring {
905 unsigned ring_size; 859 unsigned ring_size;
906 unsigned ring_free_dw; 860 unsigned ring_free_dw;
907 int count_dw; 861 int count_dw;
908 atomic_t last_rptr;
909 atomic64_t last_activity;
910 uint64_t gpu_addr; 862 uint64_t gpu_addr;
911 uint32_t align_mask; 863 uint32_t align_mask;
912 uint32_t ptr_mask; 864 uint32_t ptr_mask;
@@ -960,9 +912,14 @@ struct amdgpu_ring {
960#define AMDGPU_PTE_FRAG_64KB (4 << 7) 912#define AMDGPU_PTE_FRAG_64KB (4 << 7)
961#define AMDGPU_LOG2_PAGES_PER_FRAG 4 913#define AMDGPU_LOG2_PAGES_PER_FRAG 4
962 914
915/* How to programm VM fault handling */
916#define AMDGPU_VM_FAULT_STOP_NEVER 0
917#define AMDGPU_VM_FAULT_STOP_FIRST 1
918#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
919
963struct amdgpu_vm_pt { 920struct amdgpu_vm_pt {
964 struct amdgpu_bo *bo; 921 struct amdgpu_bo *bo;
965 uint64_t addr; 922 uint64_t addr;
966}; 923};
967 924
968struct amdgpu_vm_id { 925struct amdgpu_vm_id {
@@ -970,13 +927,9 @@ struct amdgpu_vm_id {
970 uint64_t pd_gpu_addr; 927 uint64_t pd_gpu_addr;
971 /* last flushed PD/PT update */ 928 /* last flushed PD/PT update */
972 struct fence *flushed_updates; 929 struct fence *flushed_updates;
973 /* last use of vmid */
974 struct amdgpu_fence *last_id_use;
975}; 930};
976 931
977struct amdgpu_vm { 932struct amdgpu_vm {
978 struct mutex mutex;
979
980 struct rb_root va; 933 struct rb_root va;
981 934
982 /* protecting invalidated */ 935 /* protecting invalidated */
@@ -1001,24 +954,72 @@ struct amdgpu_vm {
1001 954
1002 /* for id and flush management per ring */ 955 /* for id and flush management per ring */
1003 struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; 956 struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
957 /* for interval tree */
958 spinlock_t it_lock;
959 /* protecting freed */
960 spinlock_t freed_lock;
1004}; 961};
1005 962
1006struct amdgpu_vm_manager { 963struct amdgpu_vm_manager {
1007 struct amdgpu_fence *active[AMDGPU_NUM_VM]; 964 struct {
1008 uint32_t max_pfn; 965 struct fence *active;
966 atomic_long_t owner;
967 } ids[AMDGPU_NUM_VM];
968
969 uint32_t max_pfn;
1009 /* number of VMIDs */ 970 /* number of VMIDs */
1010 unsigned nvm; 971 unsigned nvm;
1011 /* vram base address for page table entry */ 972 /* vram base address for page table entry */
1012 u64 vram_base_offset; 973 u64 vram_base_offset;
1013 /* is vm enabled? */ 974 /* is vm enabled? */
1014 bool enabled; 975 bool enabled;
1015 /* for hw to save the PD addr on suspend/resume */
1016 uint32_t saved_table_addr[AMDGPU_NUM_VM];
1017 /* vm pte handling */ 976 /* vm pte handling */
1018 const struct amdgpu_vm_pte_funcs *vm_pte_funcs; 977 const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
1019 struct amdgpu_ring *vm_pte_funcs_ring; 978 struct amdgpu_ring *vm_pte_funcs_ring;
1020}; 979};
1021 980
981void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
982int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
983void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
984struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
985 struct amdgpu_vm *vm,
986 struct list_head *head);
987int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
988 struct amdgpu_sync *sync);
989void amdgpu_vm_flush(struct amdgpu_ring *ring,
990 struct amdgpu_vm *vm,
991 struct fence *updates);
992void amdgpu_vm_fence(struct amdgpu_device *adev,
993 struct amdgpu_vm *vm,
994 struct fence *fence);
995uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
996int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
997 struct amdgpu_vm *vm);
998int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
999 struct amdgpu_vm *vm);
1000int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1001 struct amdgpu_sync *sync);
1002int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1003 struct amdgpu_bo_va *bo_va,
1004 struct ttm_mem_reg *mem);
1005void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
1006 struct amdgpu_bo *bo);
1007struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
1008 struct amdgpu_bo *bo);
1009struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
1010 struct amdgpu_vm *vm,
1011 struct amdgpu_bo *bo);
1012int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1013 struct amdgpu_bo_va *bo_va,
1014 uint64_t addr, uint64_t offset,
1015 uint64_t size, uint32_t flags);
1016int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1017 struct amdgpu_bo_va *bo_va,
1018 uint64_t addr);
1019void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1020 struct amdgpu_bo_va *bo_va);
1021int amdgpu_vm_free_job(struct amdgpu_job *job);
1022
1022/* 1023/*
1023 * context related structures 1024 * context related structures
1024 */ 1025 */
@@ -1223,8 +1224,6 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring);
1223void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring); 1224void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring);
1224void amdgpu_ring_undo(struct amdgpu_ring *ring); 1225void amdgpu_ring_undo(struct amdgpu_ring *ring);
1225void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring); 1226void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring);
1226void amdgpu_ring_lockup_update(struct amdgpu_ring *ring);
1227bool amdgpu_ring_test_lockup(struct amdgpu_ring *ring);
1228unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, 1227unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
1229 uint32_t **data); 1228 uint32_t **data);
1230int amdgpu_ring_restore(struct amdgpu_ring *ring, 1229int amdgpu_ring_restore(struct amdgpu_ring *ring,
@@ -1234,6 +1233,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
1234 struct amdgpu_irq_src *irq_src, unsigned irq_type, 1233 struct amdgpu_irq_src *irq_src, unsigned irq_type,
1235 enum amdgpu_ring_type ring_type); 1234 enum amdgpu_ring_type ring_type);
1236void amdgpu_ring_fini(struct amdgpu_ring *ring); 1235void amdgpu_ring_fini(struct amdgpu_ring *ring);
1236struct amdgpu_ring *amdgpu_ring_from_fence(struct fence *f);
1237 1237
1238/* 1238/*
1239 * CS. 1239 * CS.
@@ -1256,6 +1256,7 @@ struct amdgpu_cs_parser {
1256 /* relocations */ 1256 /* relocations */
1257 struct amdgpu_bo_list_entry *vm_bos; 1257 struct amdgpu_bo_list_entry *vm_bos;
1258 struct list_head validated; 1258 struct list_head validated;
1259 struct fence *fence;
1259 1260
1260 struct amdgpu_ib *ibs; 1261 struct amdgpu_ib *ibs;
1261 uint32_t num_ibs; 1262 uint32_t num_ibs;
@@ -1271,7 +1272,7 @@ struct amdgpu_job {
1271 struct amdgpu_device *adev; 1272 struct amdgpu_device *adev;
1272 struct amdgpu_ib *ibs; 1273 struct amdgpu_ib *ibs;
1273 uint32_t num_ibs; 1274 uint32_t num_ibs;
1274 struct mutex job_lock; 1275 void *owner;
1275 struct amdgpu_user_fence uf; 1276 struct amdgpu_user_fence uf;
1276 int (*free_job)(struct amdgpu_job *job); 1277 int (*free_job)(struct amdgpu_job *job);
1277}; 1278};
@@ -1654,6 +1655,7 @@ struct amdgpu_pm {
1654 u8 fan_max_rpm; 1655 u8 fan_max_rpm;
1655 /* dpm */ 1656 /* dpm */
1656 bool dpm_enabled; 1657 bool dpm_enabled;
1658 bool sysfs_initialized;
1657 struct amdgpu_dpm dpm; 1659 struct amdgpu_dpm dpm;
1658 const struct firmware *fw; /* SMC firmware */ 1660 const struct firmware *fw; /* SMC firmware */
1659 uint32_t fw_version; 1661 uint32_t fw_version;
@@ -1708,7 +1710,7 @@ struct amdgpu_vce {
1708/* 1710/*
1709 * SDMA 1711 * SDMA
1710 */ 1712 */
1711struct amdgpu_sdma { 1713struct amdgpu_sdma_instance {
1712 /* SDMA firmware */ 1714 /* SDMA firmware */
1713 const struct firmware *fw; 1715 const struct firmware *fw;
1714 uint32_t fw_version; 1716 uint32_t fw_version;
@@ -1718,6 +1720,13 @@ struct amdgpu_sdma {
1718 bool burst_nop; 1720 bool burst_nop;
1719}; 1721};
1720 1722
1723struct amdgpu_sdma {
1724 struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
1725 struct amdgpu_irq_src trap_irq;
1726 struct amdgpu_irq_src illegal_inst_irq;
1727 int num_instances;
1728};
1729
1721/* 1730/*
1722 * Firmware 1731 * Firmware
1723 */ 1732 */
@@ -1750,11 +1759,11 @@ void amdgpu_test_syncing(struct amdgpu_device *adev);
1750int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr); 1759int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
1751void amdgpu_mn_unregister(struct amdgpu_bo *bo); 1760void amdgpu_mn_unregister(struct amdgpu_bo *bo);
1752#else 1761#else
1753static int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) 1762static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
1754{ 1763{
1755 return -ENODEV; 1764 return -ENODEV;
1756} 1765}
1757static void amdgpu_mn_unregister(struct amdgpu_bo *bo) {} 1766static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
1758#endif 1767#endif
1759 1768
1760/* 1769/*
@@ -1946,7 +1955,6 @@ struct amdgpu_device {
1946 struct device *dev; 1955 struct device *dev;
1947 struct drm_device *ddev; 1956 struct drm_device *ddev;
1948 struct pci_dev *pdev; 1957 struct pci_dev *pdev;
1949 struct rw_semaphore exclusive_lock;
1950 1958
1951 /* ASIC */ 1959 /* ASIC */
1952 enum amd_asic_type asic_type; 1960 enum amd_asic_type asic_type;
@@ -1960,7 +1968,6 @@ struct amdgpu_device {
1960 bool suspend; 1968 bool suspend;
1961 bool need_dma32; 1969 bool need_dma32;
1962 bool accel_working; 1970 bool accel_working;
1963 bool needs_reset;
1964 struct work_struct reset_work; 1971 struct work_struct reset_work;
1965 struct notifier_block acpi_nb; 1972 struct notifier_block acpi_nb;
1966 struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; 1973 struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
@@ -2064,9 +2071,7 @@ struct amdgpu_device {
2064 struct amdgpu_gfx gfx; 2071 struct amdgpu_gfx gfx;
2065 2072
2066 /* sdma */ 2073 /* sdma */
2067 struct amdgpu_sdma sdma[AMDGPU_MAX_SDMA_INSTANCES]; 2074 struct amdgpu_sdma sdma;
2068 struct amdgpu_irq_src sdma_trap_irq;
2069 struct amdgpu_irq_src sdma_illegal_inst_irq;
2070 2075
2071 /* uvd */ 2076 /* uvd */
2072 bool has_uvd; 2077 bool has_uvd;
@@ -2203,17 +2208,18 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
2203 ring->ring_free_dw--; 2208 ring->ring_free_dw--;
2204} 2209}
2205 2210
2206static inline struct amdgpu_sdma * amdgpu_get_sdma_instance(struct amdgpu_ring *ring) 2211static inline struct amdgpu_sdma_instance *
2212amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
2207{ 2213{
2208 struct amdgpu_device *adev = ring->adev; 2214 struct amdgpu_device *adev = ring->adev;
2209 int i; 2215 int i;
2210 2216
2211 for (i = 0; i < AMDGPU_MAX_SDMA_INSTANCES; i++) 2217 for (i = 0; i < adev->sdma.num_instances; i++)
2212 if (&adev->sdma[i].ring == ring) 2218 if (&adev->sdma.instance[i].ring == ring)
2213 break; 2219 break;
2214 2220
2215 if (i < AMDGPU_MAX_SDMA_INSTANCES) 2221 if (i < AMDGPU_MAX_SDMA_INSTANCES)
2216 return &adev->sdma[i]; 2222 return &adev->sdma.instance[i];
2217 else 2223 else
2218 return NULL; 2224 return NULL;
2219} 2225}
@@ -2240,7 +2246,6 @@ static inline struct amdgpu_sdma * amdgpu_get_sdma_instance(struct amdgpu_ring *
2240#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) 2246#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
2241#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) 2247#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
2242#define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r)) 2248#define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r))
2243#define amdgpu_ring_is_lockup(r) (r)->funcs->is_lockup((r))
2244#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) 2249#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
2245#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) 2250#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
2246#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) 2251#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
@@ -2298,11 +2303,6 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev);
2298bool amdgpu_card_posted(struct amdgpu_device *adev); 2303bool amdgpu_card_posted(struct amdgpu_device *adev);
2299void amdgpu_update_display_priority(struct amdgpu_device *adev); 2304void amdgpu_update_display_priority(struct amdgpu_device *adev);
2300bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); 2305bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
2301struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
2302 struct drm_file *filp,
2303 struct amdgpu_ctx *ctx,
2304 struct amdgpu_ib *ibs,
2305 uint32_t num_ibs);
2306 2306
2307int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); 2307int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
2308int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, 2308int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
@@ -2349,10 +2349,10 @@ void amdgpu_driver_preclose_kms(struct drm_device *dev,
2349 struct drm_file *file_priv); 2349 struct drm_file *file_priv);
2350int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon); 2350int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
2351int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon); 2351int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
2352u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, int crtc); 2352u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
2353int amdgpu_enable_vblank_kms(struct drm_device *dev, int crtc); 2353int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
2354void amdgpu_disable_vblank_kms(struct drm_device *dev, int crtc); 2354void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
2355int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, 2355int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
2356 int *max_error, 2356 int *max_error,
2357 struct timeval *vblank_time, 2357 struct timeval *vblank_time,
2358 unsigned flags); 2358 unsigned flags);
@@ -2360,49 +2360,6 @@ long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
2360 unsigned long arg); 2360 unsigned long arg);
2361 2361
2362/* 2362/*
2363 * vm
2364 */
2365int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
2366void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
2367struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
2368 struct amdgpu_vm *vm,
2369 struct list_head *head);
2370int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
2371 struct amdgpu_sync *sync);
2372void amdgpu_vm_flush(struct amdgpu_ring *ring,
2373 struct amdgpu_vm *vm,
2374 struct fence *updates);
2375void amdgpu_vm_fence(struct amdgpu_device *adev,
2376 struct amdgpu_vm *vm,
2377 struct amdgpu_fence *fence);
2378uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
2379int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
2380 struct amdgpu_vm *vm);
2381int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
2382 struct amdgpu_vm *vm);
2383int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
2384 struct amdgpu_vm *vm, struct amdgpu_sync *sync);
2385int amdgpu_vm_bo_update(struct amdgpu_device *adev,
2386 struct amdgpu_bo_va *bo_va,
2387 struct ttm_mem_reg *mem);
2388void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2389 struct amdgpu_bo *bo);
2390struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
2391 struct amdgpu_bo *bo);
2392struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2393 struct amdgpu_vm *vm,
2394 struct amdgpu_bo *bo);
2395int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2396 struct amdgpu_bo_va *bo_va,
2397 uint64_t addr, uint64_t offset,
2398 uint64_t size, uint32_t flags);
2399int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2400 struct amdgpu_bo_va *bo_va,
2401 uint64_t addr);
2402void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2403 struct amdgpu_bo_va *bo_va);
2404int amdgpu_vm_free_job(struct amdgpu_job *job);
2405/*
2406 * functions used by amdgpu_encoder.c 2363 * functions used by amdgpu_encoder.c
2407 */ 2364 */
2408struct amdgpu_afmt_acr { 2365struct amdgpu_afmt_acr {