aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu.h
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2017-06-15 19:54:02 -0400
committerDave Airlie <airlied@redhat.com>2017-06-15 19:56:53 -0400
commit04d4fb5fa63876d8e7cf67f2788aecfafc6a28a7 (patch)
tree92aec67d7b5a1359baff1a508d381234f046743e /drivers/gpu/drm/amd/amdgpu/amdgpu.h
parentbfda9aa15317838ddb259406027ef9911a1dffbc (diff)
parenta1924005a2e9bfcc4e217b4acd0a4f2421969040 (diff)
Merge branch 'drm-next-4.13' of git://people.freedesktop.org/~agd5f/linux into drm-next
New radeon and amdgpu features for 4.13: - Lots of Vega10 bug fixes - Preliminary Raven support - KIQ support for compute rings - MEC queue management rework from Andres - Audio support for DCE6 - SR-IOV improvements - Improved module parameters for controlling radeon vs amdgpu support for SI and CIK - Bug fixes - General code cleanups [airlied: dropped drmP.h header from one file was needed and build broke] * 'drm-next-4.13' of git://people.freedesktop.org/~agd5f/linux: (362 commits) drm/amdgpu: Fix compiler warnings drm/amdgpu: vm_update_ptes remove code duplication drm/amd/amdgpu: Port VCN over to new SOC15 macros drm/amd/amdgpu: Port PSP v10.0 over to new SOC15 macros drm/amd/amdgpu: Port PSP v3.1 over to new SOC15 macros drm/amd/amdgpu: Port NBIO v7.0 driver over to new SOC15 macros drm/amd/amdgpu: Port NBIO v6.1 driver over to new SOC15 macros drm/amd/amdgpu: Port UVD 7.0 over to new SOC15 macros drm/amd/amdgpu: Port MMHUB over to new SOC15 macros drm/amd/amdgpu: Cleanup gfxhub read-modify-write patterns drm/amd/amdgpu: Port GFXHUB over to new SOC15 macros drm/amd/amdgpu: Add offset variant to SOC15 macros drm/amd/powerplay: add avfs control for Vega10 drm/amdgpu: add virtual display support for raven drm/amdgpu/gfx9: fix compute ring doorbell index drm/amd/amdgpu: Rename KIQ ring to avoid spaces drm/amd/amdgpu: gfx9 tidy ups (v2) drm/amdgpu: add contiguous flag in ucode bo create drm/amdgpu: fix missed gpu info firmware when cache firmware during S3 drm/amdgpu: export test ib debugfs interface ...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu.h')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h146
1 files changed, 108 insertions, 38 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 77ff68f9932b..e0adad590ecb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -46,6 +46,8 @@
46#include <drm/drm_gem.h> 46#include <drm/drm_gem.h>
47#include <drm/amdgpu_drm.h> 47#include <drm/amdgpu_drm.h>
48 48
49#include <kgd_kfd_interface.h>
50
49#include "amd_shared.h" 51#include "amd_shared.h"
50#include "amdgpu_mode.h" 52#include "amdgpu_mode.h"
51#include "amdgpu_ih.h" 53#include "amdgpu_ih.h"
@@ -62,6 +64,7 @@
62#include "amdgpu_acp.h" 64#include "amdgpu_acp.h"
63#include "amdgpu_uvd.h" 65#include "amdgpu_uvd.h"
64#include "amdgpu_vce.h" 66#include "amdgpu_vce.h"
67#include "amdgpu_vcn.h"
65 68
66#include "gpu_scheduler.h" 69#include "gpu_scheduler.h"
67#include "amdgpu_virt.h" 70#include "amdgpu_virt.h"
@@ -92,6 +95,7 @@ extern int amdgpu_vm_size;
92extern int amdgpu_vm_block_size; 95extern int amdgpu_vm_block_size;
93extern int amdgpu_vm_fault_stop; 96extern int amdgpu_vm_fault_stop;
94extern int amdgpu_vm_debug; 97extern int amdgpu_vm_debug;
98extern int amdgpu_vm_update_mode;
95extern int amdgpu_sched_jobs; 99extern int amdgpu_sched_jobs;
96extern int amdgpu_sched_hw_submission; 100extern int amdgpu_sched_hw_submission;
97extern int amdgpu_no_evict; 101extern int amdgpu_no_evict;
@@ -109,6 +113,15 @@ extern int amdgpu_prim_buf_per_se;
109extern int amdgpu_pos_buf_per_se; 113extern int amdgpu_pos_buf_per_se;
110extern int amdgpu_cntl_sb_buf_per_se; 114extern int amdgpu_cntl_sb_buf_per_se;
111extern int amdgpu_param_buf_per_se; 115extern int amdgpu_param_buf_per_se;
116extern int amdgpu_job_hang_limit;
117extern int amdgpu_lbpw;
118
119#ifdef CONFIG_DRM_AMDGPU_SI
120extern int amdgpu_si_support;
121#endif
122#ifdef CONFIG_DRM_AMDGPU_CIK
123extern int amdgpu_cik_support;
124#endif
112 125
113#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */ 126#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
114#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 127#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
@@ -305,8 +318,8 @@ struct amdgpu_gart_funcs {
305 /* set pte flags based per asic */ 318 /* set pte flags based per asic */
306 uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev, 319 uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
307 uint32_t flags); 320 uint32_t flags);
308 /* adjust mc addr in fb for APU case */ 321 /* get the pde for a given mc addr */
309 u64 (*adjust_mc_addr)(struct amdgpu_device *adev, u64 addr); 322 u64 (*get_vm_pde)(struct amdgpu_device *adev, u64 addr);
310 uint32_t (*get_invalidate_req)(unsigned int vm_id); 323 uint32_t (*get_invalidate_req)(unsigned int vm_id);
311}; 324};
312 325
@@ -554,7 +567,7 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
554void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); 567void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
555int amdgpu_gart_init(struct amdgpu_device *adev); 568int amdgpu_gart_init(struct amdgpu_device *adev);
556void amdgpu_gart_fini(struct amdgpu_device *adev); 569void amdgpu_gart_fini(struct amdgpu_device *adev);
557void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, 570int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
558 int pages); 571 int pages);
559int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, 572int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
560 int pages, struct page **pagelist, 573 int pages, struct page **pagelist,
@@ -602,6 +615,7 @@ struct amdgpu_mc {
602 uint32_t srbm_soft_reset; 615 uint32_t srbm_soft_reset;
603 struct amdgpu_mode_mc_save save; 616 struct amdgpu_mode_mc_save save;
604 bool prt_warning; 617 bool prt_warning;
618 uint64_t stolen_size;
605 /* apertures */ 619 /* apertures */
606 u64 shared_aperture_start; 620 u64 shared_aperture_start;
607 u64 shared_aperture_end; 621 u64 shared_aperture_end;
@@ -772,6 +786,29 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
772 struct dma_fence **f); 786 struct dma_fence **f);
773 787
774/* 788/*
789 * Queue manager
790 */
791struct amdgpu_queue_mapper {
792 int hw_ip;
793 struct mutex lock;
794 /* protected by lock */
795 struct amdgpu_ring *queue_map[AMDGPU_MAX_RINGS];
796};
797
798struct amdgpu_queue_mgr {
799 struct amdgpu_queue_mapper mapper[AMDGPU_MAX_IP_NUM];
800};
801
802int amdgpu_queue_mgr_init(struct amdgpu_device *adev,
803 struct amdgpu_queue_mgr *mgr);
804int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
805 struct amdgpu_queue_mgr *mgr);
806int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
807 struct amdgpu_queue_mgr *mgr,
808 int hw_ip, int instance, int ring,
809 struct amdgpu_ring **out_ring);
810
811/*
775 * context related structures 812 * context related structures
776 */ 813 */
777 814
@@ -784,6 +821,7 @@ struct amdgpu_ctx_ring {
784struct amdgpu_ctx { 821struct amdgpu_ctx {
785 struct kref refcount; 822 struct kref refcount;
786 struct amdgpu_device *adev; 823 struct amdgpu_device *adev;
824 struct amdgpu_queue_mgr queue_mgr;
787 unsigned reset_counter; 825 unsigned reset_counter;
788 spinlock_t ring_lock; 826 spinlock_t ring_lock;
789 struct dma_fence **fences; 827 struct dma_fence **fences;
@@ -822,6 +860,7 @@ struct amdgpu_fpriv {
822 struct mutex bo_list_lock; 860 struct mutex bo_list_lock;
823 struct idr bo_list_handles; 861 struct idr bo_list_handles;
824 struct amdgpu_ctx_mgr ctx_mgr; 862 struct amdgpu_ctx_mgr ctx_mgr;
863 u32 vram_lost_counter;
825}; 864};
826 865
827/* 866/*
@@ -893,20 +932,26 @@ struct amdgpu_rlc {
893 u32 *register_restore; 932 u32 *register_restore;
894}; 933};
895 934
935#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
936
896struct amdgpu_mec { 937struct amdgpu_mec {
897 struct amdgpu_bo *hpd_eop_obj; 938 struct amdgpu_bo *hpd_eop_obj;
898 u64 hpd_eop_gpu_addr; 939 u64 hpd_eop_gpu_addr;
899 struct amdgpu_bo *mec_fw_obj; 940 struct amdgpu_bo *mec_fw_obj;
900 u64 mec_fw_gpu_addr; 941 u64 mec_fw_gpu_addr;
901 u32 num_pipe;
902 u32 num_mec; 942 u32 num_mec;
903 u32 num_queue; 943 u32 num_pipe_per_mec;
944 u32 num_queue_per_pipe;
904 void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS + 1]; 945 void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS + 1];
946
947 /* These are the resources for which amdgpu takes ownership */
948 DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
905}; 949};
906 950
907struct amdgpu_kiq { 951struct amdgpu_kiq {
908 u64 eop_gpu_addr; 952 u64 eop_gpu_addr;
909 struct amdgpu_bo *eop_obj; 953 struct amdgpu_bo *eop_obj;
954 struct mutex ring_mutex;
910 struct amdgpu_ring ring; 955 struct amdgpu_ring ring;
911 struct amdgpu_irq_src irq; 956 struct amdgpu_irq_src irq;
912}; 957};
@@ -983,7 +1028,10 @@ struct amdgpu_gfx_config {
983struct amdgpu_cu_info { 1028struct amdgpu_cu_info {
984 uint32_t number; /* total active CU number */ 1029 uint32_t number; /* total active CU number */
985 uint32_t ao_cu_mask; 1030 uint32_t ao_cu_mask;
1031 uint32_t max_waves_per_simd;
986 uint32_t wave_front_size; 1032 uint32_t wave_front_size;
1033 uint32_t max_scratch_slots_per_cu;
1034 uint32_t lds_size;
987 uint32_t bitmap[4][4]; 1035 uint32_t bitmap[4][4];
988}; 1036};
989 1037
@@ -1061,6 +1109,8 @@ struct amdgpu_gfx {
1061 uint32_t grbm_soft_reset; 1109 uint32_t grbm_soft_reset;
1062 uint32_t srbm_soft_reset; 1110 uint32_t srbm_soft_reset;
1063 bool in_reset; 1111 bool in_reset;
1112 /* s3/s4 mask */
1113 bool in_suspend;
1064 /* NGG */ 1114 /* NGG */
1065 struct amdgpu_ngg ngg; 1115 struct amdgpu_ngg ngg;
1066}; 1116};
@@ -1114,7 +1164,6 @@ struct amdgpu_cs_parser {
1114#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */ 1164#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */
1115#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */ 1165#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */
1116#define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */ 1166#define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
1117#define AMDGPU_VM_DOMAIN (1 << 3) /* bit set means in virtual memory context */
1118 1167
1119struct amdgpu_job { 1168struct amdgpu_job {
1120 struct amd_sched_job base; 1169 struct amd_sched_job base;
@@ -1122,6 +1171,8 @@ struct amdgpu_job {
1122 struct amdgpu_vm *vm; 1171 struct amdgpu_vm *vm;
1123 struct amdgpu_ring *ring; 1172 struct amdgpu_ring *ring;
1124 struct amdgpu_sync sync; 1173 struct amdgpu_sync sync;
1174 struct amdgpu_sync dep_sync;
1175 struct amdgpu_sync sched_sync;
1125 struct amdgpu_ib *ibs; 1176 struct amdgpu_ib *ibs;
1126 struct dma_fence *fence; /* the hw fence */ 1177 struct dma_fence *fence; /* the hw fence */
1127 uint32_t preamble_status; 1178 uint32_t preamble_status;
@@ -1129,7 +1180,6 @@ struct amdgpu_job {
1129 void *owner; 1180 void *owner;
1130 uint64_t fence_ctx; /* the fence_context this job uses */ 1181 uint64_t fence_ctx; /* the fence_context this job uses */
1131 bool vm_needs_flush; 1182 bool vm_needs_flush;
1132 bool need_pipeline_sync;
1133 unsigned vm_id; 1183 unsigned vm_id;
1134 uint64_t vm_pd_addr; 1184 uint64_t vm_pd_addr;
1135 uint32_t gds_base, gds_size; 1185 uint32_t gds_base, gds_size;
@@ -1221,6 +1271,9 @@ struct amdgpu_firmware {
1221 const struct amdgpu_psp_funcs *funcs; 1271 const struct amdgpu_psp_funcs *funcs;
1222 struct amdgpu_bo *rbuf; 1272 struct amdgpu_bo *rbuf;
1223 struct mutex mutex; 1273 struct mutex mutex;
1274
1275 /* gpu info firmware data pointer */
1276 const struct firmware *gpu_info_fw;
1224}; 1277};
1225 1278
1226/* 1279/*
@@ -1296,7 +1349,6 @@ struct amdgpu_smumgr {
1296 */ 1349 */
1297struct amdgpu_allowed_register_entry { 1350struct amdgpu_allowed_register_entry {
1298 uint32_t reg_offset; 1351 uint32_t reg_offset;
1299 bool untouched;
1300 bool grbm_indexed; 1352 bool grbm_indexed;
1301}; 1353};
1302 1354
@@ -1424,6 +1476,7 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1424typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 1476typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1425typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); 1477typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
1426 1478
1479#define AMDGPU_RESET_MAGIC_NUM 64
1427struct amdgpu_device { 1480struct amdgpu_device {
1428 struct device *dev; 1481 struct device *dev;
1429 struct drm_device *ddev; 1482 struct drm_device *ddev;
@@ -1523,7 +1576,9 @@ struct amdgpu_device {
1523 atomic64_t gtt_usage; 1576 atomic64_t gtt_usage;
1524 atomic64_t num_bytes_moved; 1577 atomic64_t num_bytes_moved;
1525 atomic64_t num_evictions; 1578 atomic64_t num_evictions;
1579 atomic64_t num_vram_cpu_page_faults;
1526 atomic_t gpu_reset_counter; 1580 atomic_t gpu_reset_counter;
1581 atomic_t vram_lost_counter;
1527 1582
1528 /* data for buffer migration throttling */ 1583 /* data for buffer migration throttling */
1529 struct { 1584 struct {
@@ -1570,11 +1625,18 @@ struct amdgpu_device {
1570 /* sdma */ 1625 /* sdma */
1571 struct amdgpu_sdma sdma; 1626 struct amdgpu_sdma sdma;
1572 1627
1573 /* uvd */ 1628 union {
1574 struct amdgpu_uvd uvd; 1629 struct {
1630 /* uvd */
1631 struct amdgpu_uvd uvd;
1632
1633 /* vce */
1634 struct amdgpu_vce vce;
1635 };
1575 1636
1576 /* vce */ 1637 /* vcn */
1577 struct amdgpu_vce vce; 1638 struct amdgpu_vcn vcn;
1639 };
1578 1640
1579 /* firmwares */ 1641 /* firmwares */
1580 struct amdgpu_firmware firmware; 1642 struct amdgpu_firmware firmware;
@@ -1598,6 +1660,9 @@ struct amdgpu_device {
1598 /* amdkfd interface */ 1660 /* amdkfd interface */
1599 struct kfd_dev *kfd; 1661 struct kfd_dev *kfd;
1600 1662
1663 /* delayed work_func for deferring clockgating during resume */
1664 struct delayed_work late_init_work;
1665
1601 struct amdgpu_virt virt; 1666 struct amdgpu_virt virt;
1602 1667
1603 /* link all shadow bo */ 1668 /* link all shadow bo */
@@ -1606,9 +1671,13 @@ struct amdgpu_device {
1606 /* link all gtt */ 1671 /* link all gtt */
1607 spinlock_t gtt_list_lock; 1672 spinlock_t gtt_list_lock;
1608 struct list_head gtt_list; 1673 struct list_head gtt_list;
1674 /* keep an lru list of rings by HW IP */
1675 struct list_head ring_lru_list;
1676 spinlock_t ring_lru_list_lock;
1609 1677
1610 /* record hw reset is performed */ 1678 /* record hw reset is performed */
1611 bool has_hw_reset; 1679 bool has_hw_reset;
1680 u8 reset_magic[AMDGPU_RESET_MAGIC_NUM];
1612 1681
1613}; 1682};
1614 1683
@@ -1617,7 +1686,6 @@ static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
1617 return container_of(bdev, struct amdgpu_device, mman.bdev); 1686 return container_of(bdev, struct amdgpu_device, mman.bdev);
1618} 1687}
1619 1688
1620bool amdgpu_device_is_px(struct drm_device *dev);
1621int amdgpu_device_init(struct amdgpu_device *adev, 1689int amdgpu_device_init(struct amdgpu_device *adev,
1622 struct drm_device *ddev, 1690 struct drm_device *ddev,
1623 struct pci_dev *pdev, 1691 struct pci_dev *pdev,
@@ -1733,30 +1801,31 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, void *sr
1733 unsigned occupied, chunk1, chunk2; 1801 unsigned occupied, chunk1, chunk2;
1734 void *dst; 1802 void *dst;
1735 1803
1736 if (ring->count_dw < count_dw) { 1804 if (unlikely(ring->count_dw < count_dw)) {
1737 DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); 1805 DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
1738 } else { 1806 return;
1739 occupied = ring->wptr & ring->buf_mask; 1807 }
1740 dst = (void *)&ring->ring[occupied]; 1808
1741 chunk1 = ring->buf_mask + 1 - occupied; 1809 occupied = ring->wptr & ring->buf_mask;
1742 chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1; 1810 dst = (void *)&ring->ring[occupied];
1743 chunk2 = count_dw - chunk1; 1811 chunk1 = ring->buf_mask + 1 - occupied;
1744 chunk1 <<= 2; 1812 chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1;
1745 chunk2 <<= 2; 1813 chunk2 = count_dw - chunk1;
1746 1814 chunk1 <<= 2;
1747 if (chunk1) 1815 chunk2 <<= 2;
1748 memcpy(dst, src, chunk1); 1816
1749 1817 if (chunk1)
1750 if (chunk2) { 1818 memcpy(dst, src, chunk1);
1751 src += chunk1; 1819
1752 dst = (void *)ring->ring; 1820 if (chunk2) {
1753 memcpy(dst, src, chunk2); 1821 src += chunk1;
1754 } 1822 dst = (void *)ring->ring;
1755 1823 memcpy(dst, src, chunk2);
1756 ring->wptr += count_dw;
1757 ring->wptr &= ring->ptr_mask;
1758 ring->count_dw -= count_dw;
1759 } 1824 }
1825
1826 ring->wptr += count_dw;
1827 ring->wptr &= ring->ptr_mask;
1828 ring->count_dw -= count_dw;
1760} 1829}
1761 1830
1762static inline struct amdgpu_sdma_instance * 1831static inline struct amdgpu_sdma_instance *
@@ -1792,6 +1861,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
1792#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev)) 1861#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
1793#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) 1862#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
1794#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) 1863#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
1864#define amdgpu_gart_get_vm_pde(adev, addr) (adev)->gart.gart_funcs->get_vm_pde((adev), (addr))
1795#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) 1865#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
1796#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) 1866#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
1797#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) 1867#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
@@ -1813,6 +1883,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
1813#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d)) 1883#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
1814#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d)) 1884#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
1815#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v)) 1885#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
1886#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
1816#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) 1887#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
1817#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) 1888#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
1818#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) 1889#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
@@ -1849,9 +1920,6 @@ bool amdgpu_need_post(struct amdgpu_device *adev);
1849void amdgpu_update_display_priority(struct amdgpu_device *adev); 1920void amdgpu_update_display_priority(struct amdgpu_device *adev);
1850 1921
1851int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); 1922int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
1852int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
1853 u32 ip_instance, u32 ring,
1854 struct amdgpu_ring **out_ring);
1855void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes); 1923void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes);
1856void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); 1924void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
1857bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); 1925bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
@@ -1900,6 +1968,8 @@ static inline bool amdgpu_has_atpx(void) { return false; }
1900extern const struct drm_ioctl_desc amdgpu_ioctls_kms[]; 1968extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
1901extern const int amdgpu_max_kms_ioctl; 1969extern const int amdgpu_max_kms_ioctl;
1902 1970
1971bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
1972 struct amdgpu_fpriv *fpriv);
1903int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags); 1973int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
1904void amdgpu_driver_unload_kms(struct drm_device *dev); 1974void amdgpu_driver_unload_kms(struct drm_device *dev);
1905void amdgpu_driver_lastclose_kms(struct drm_device *dev); 1975void amdgpu_driver_lastclose_kms(struct drm_device *dev);