aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-11-10 12:33:06 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-10 12:33:06 -0500
commit3e82806b97398d542a5e03bd94861f79ce10ecee (patch)
tree467753d23d422fc42a07992ac25cae7889e48c18 /drivers/gpu/drm/amd/amdgpu/amdgpu.h
parentbd4f203e433387d39be404b67ad02acf6f76b7bc (diff)
parent816d2206f0f9953ca854e4ff1a2749a5cbd62715 (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "I Was Almost Tempted To Capitalise Every Word, but then I decided I couldn't read it myself! I've also got one pull request for the sti driver outstanding. It relied on a commit in Greg's tree and I didn't find out in time, that commit is in your tree now so I might send that along once this is merged. I also had the accidental misfortune to have access to a Skylake on my desk for a few days, and I've had to encourage Intel to try harder, which seems to be happening now. Here is the main drm-next pull request for 4.4. Highlights: New driver: vc4 driver for the Rasberry Pi VPU. (From Eric Anholt at Broadcom.) Core: Atomic fbdev support Atomic helpers for runtime pm dp/aux i2c STATUS_UPDATE handling struct_mutex usage cleanups. Generic of probing support. Documentation: Kerneldoc for VGA switcheroo code. Rename to gpu instead of drm to reflect scope. i915: Skylake GuC firmware fixes HPD A support VBT backlight fallbacks Fastboot by default for some systems FBC work BXT/SKL workarounds Skylake deeper sleep state fixes amdgpu: Enable GPU scheduler by default New atombios opcodes GPUVM debugging options Stoney support. Fencing cleanups. radeon: More efficient CS checking nouveau: gk20a instance memory handling improvements. Improved PGOB detection and GK107 support Kepler GDDR5 PLL statbility improvement G8x/GT2xx reclock improvements new userspace API compatiblity fixes. virtio-gpu: Add 3D support - qemu 2.5 has it merged for it's gtk backend. msm: Initial msm88896 (snapdragon 8200) exynos: HDMI cleanups Enable mixer driver byt default Add DECON-TV support vmwgfx: Move to using memremap + fixes. rcar-du: Add support for R8A7793/4 DU armada: Remove support for non-component mode Improved plane handling Power savings while in DPMS off. tda998x: Remove unused slave encoder support Use more HDMI helpers Fix EDID read handling dwhdmi: Interlace video mode support for ipu-v3/dw_hdmi Hotplug state fixes Audio driver integration imx: More color formats support. tegra: Minor fixes/improvements" [ Merge fixup: remove unused variable 'dev' that had all uses removed in commit 4e270f088011: "drm/gem: Drop struct_mutex requirement from drm_gem_mmap_obj" ] * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (764 commits) drm/vmwgfx: Relax irq locking somewhat drm/vmwgfx: Properly flush cursor updates and page-flips drm/i915/skl: disable display side power well support for now drm/i915: Extend DSL readout fix to BDW and SKL. drm/i915: Do graphics device reset under forcewake drm/i915: Skip fence installation for objects with rotated views (v4) vga_switcheroo: Drop client power state VGA_SWITCHEROO_INIT drm/amdgpu: group together common fence implementation drm/amdgpu: remove AMDGPU_FENCE_OWNER_MOVE drm/amdgpu: remove now unused fence functions drm/amdgpu: fix fence fallback check drm/amdgpu: fix stoping the scheduler timeout drm/amdgpu: cleanup on error in amdgpu_cs_ioctl() drm/i915: Fix locking around GuC firmware load drm/amdgpu: update Fiji's Golden setting drm/amdgpu: update Fiji's rev id drm/amdgpu: extract common code in vi_common_early_init drm/amd/scheduler: don't oops on failure to load drm/amdgpu: don't oops on failure to load (v2) drm/amdgpu: don't VT switch on suspend ...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu.h')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h104
1 files changed, 31 insertions, 73 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 0d13e6368b96..615ce6d464fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -79,6 +79,8 @@ extern int amdgpu_bapm;
79extern int amdgpu_deep_color; 79extern int amdgpu_deep_color;
80extern int amdgpu_vm_size; 80extern int amdgpu_vm_size;
81extern int amdgpu_vm_block_size; 81extern int amdgpu_vm_block_size;
82extern int amdgpu_vm_fault_stop;
83extern int amdgpu_vm_debug;
82extern int amdgpu_enable_scheduler; 84extern int amdgpu_enable_scheduler;
83extern int amdgpu_sched_jobs; 85extern int amdgpu_sched_jobs;
84extern int amdgpu_sched_hw_submission; 86extern int amdgpu_sched_hw_submission;
@@ -343,7 +345,6 @@ struct amdgpu_ring_funcs {
343 /* testing functions */ 345 /* testing functions */
344 int (*test_ring)(struct amdgpu_ring *ring); 346 int (*test_ring)(struct amdgpu_ring *ring);
345 int (*test_ib)(struct amdgpu_ring *ring); 347 int (*test_ib)(struct amdgpu_ring *ring);
346 bool (*is_lockup)(struct amdgpu_ring *ring);
347 /* insert NOP packets */ 348 /* insert NOP packets */
348 void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); 349 void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
349}; 350};
@@ -404,7 +405,6 @@ struct amdgpu_fence_driver {
404/* some special values for the owner field */ 405/* some special values for the owner field */
405#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul) 406#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
406#define AMDGPU_FENCE_OWNER_VM ((void*)1ul) 407#define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
407#define AMDGPU_FENCE_OWNER_MOVE ((void*)2ul)
408 408
409#define AMDGPU_FENCE_FLAG_64BIT (1 << 0) 409#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
410#define AMDGPU_FENCE_FLAG_INT (1 << 1) 410#define AMDGPU_FENCE_FLAG_INT (1 << 1)
@@ -446,58 +446,11 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
446int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); 446int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
447unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); 447unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
448 448
449signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
450 struct fence **array,
451 uint32_t count,
452 bool intr,
453 signed long t);
454struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
455void amdgpu_fence_unref(struct amdgpu_fence **fence);
456
457bool amdgpu_fence_need_sync(struct amdgpu_fence *fence, 449bool amdgpu_fence_need_sync(struct amdgpu_fence *fence,
458 struct amdgpu_ring *ring); 450 struct amdgpu_ring *ring);
459void amdgpu_fence_note_sync(struct amdgpu_fence *fence, 451void amdgpu_fence_note_sync(struct amdgpu_fence *fence,
460 struct amdgpu_ring *ring); 452 struct amdgpu_ring *ring);
461 453
462static inline struct amdgpu_fence *amdgpu_fence_later(struct amdgpu_fence *a,
463 struct amdgpu_fence *b)
464{
465 if (!a) {
466 return b;
467 }
468
469 if (!b) {
470 return a;
471 }
472
473 BUG_ON(a->ring != b->ring);
474
475 if (a->seq > b->seq) {
476 return a;
477 } else {
478 return b;
479 }
480}
481
482static inline bool amdgpu_fence_is_earlier(struct amdgpu_fence *a,
483 struct amdgpu_fence *b)
484{
485 if (!a) {
486 return false;
487 }
488
489 if (!b) {
490 return true;
491 }
492
493 BUG_ON(a->ring != b->ring);
494
495 return a->seq < b->seq;
496}
497
498int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
499 void *owner, struct amdgpu_fence **fence);
500
501/* 454/*
502 * TTM. 455 * TTM.
503 */ 456 */
@@ -708,7 +661,7 @@ void amdgpu_semaphore_free(struct amdgpu_device *adev,
708 */ 661 */
709struct amdgpu_sync { 662struct amdgpu_sync {
710 struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS]; 663 struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS];
711 struct amdgpu_fence *sync_to[AMDGPU_MAX_RINGS]; 664 struct fence *sync_to[AMDGPU_MAX_RINGS];
712 DECLARE_HASHTABLE(fences, 4); 665 DECLARE_HASHTABLE(fences, 4);
713 struct fence *last_vm_update; 666 struct fence *last_vm_update;
714}; 667};
@@ -905,8 +858,6 @@ struct amdgpu_ring {
905 unsigned ring_size; 858 unsigned ring_size;
906 unsigned ring_free_dw; 859 unsigned ring_free_dw;
907 int count_dw; 860 int count_dw;
908 atomic_t last_rptr;
909 atomic64_t last_activity;
910 uint64_t gpu_addr; 861 uint64_t gpu_addr;
911 uint32_t align_mask; 862 uint32_t align_mask;
912 uint32_t ptr_mask; 863 uint32_t ptr_mask;
@@ -960,6 +911,11 @@ struct amdgpu_ring {
960#define AMDGPU_PTE_FRAG_64KB (4 << 7) 911#define AMDGPU_PTE_FRAG_64KB (4 << 7)
961#define AMDGPU_LOG2_PAGES_PER_FRAG 4 912#define AMDGPU_LOG2_PAGES_PER_FRAG 4
962 913
914/* How to programm VM fault handling */
915#define AMDGPU_VM_FAULT_STOP_NEVER 0
916#define AMDGPU_VM_FAULT_STOP_FIRST 1
917#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
918
963struct amdgpu_vm_pt { 919struct amdgpu_vm_pt {
964 struct amdgpu_bo *bo; 920 struct amdgpu_bo *bo;
965 uint64_t addr; 921 uint64_t addr;
@@ -971,7 +927,7 @@ struct amdgpu_vm_id {
971 /* last flushed PD/PT update */ 927 /* last flushed PD/PT update */
972 struct fence *flushed_updates; 928 struct fence *flushed_updates;
973 /* last use of vmid */ 929 /* last use of vmid */
974 struct amdgpu_fence *last_id_use; 930 struct fence *last_id_use;
975}; 931};
976 932
977struct amdgpu_vm { 933struct amdgpu_vm {
@@ -1004,7 +960,7 @@ struct amdgpu_vm {
1004}; 960};
1005 961
1006struct amdgpu_vm_manager { 962struct amdgpu_vm_manager {
1007 struct amdgpu_fence *active[AMDGPU_NUM_VM]; 963 struct fence *active[AMDGPU_NUM_VM];
1008 uint32_t max_pfn; 964 uint32_t max_pfn;
1009 /* number of VMIDs */ 965 /* number of VMIDs */
1010 unsigned nvm; 966 unsigned nvm;
@@ -1223,8 +1179,6 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring);
1223void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring); 1179void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring);
1224void amdgpu_ring_undo(struct amdgpu_ring *ring); 1180void amdgpu_ring_undo(struct amdgpu_ring *ring);
1225void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring); 1181void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring);
1226void amdgpu_ring_lockup_update(struct amdgpu_ring *ring);
1227bool amdgpu_ring_test_lockup(struct amdgpu_ring *ring);
1228unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, 1182unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
1229 uint32_t **data); 1183 uint32_t **data);
1230int amdgpu_ring_restore(struct amdgpu_ring *ring, 1184int amdgpu_ring_restore(struct amdgpu_ring *ring,
@@ -1234,6 +1188,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
1234 struct amdgpu_irq_src *irq_src, unsigned irq_type, 1188 struct amdgpu_irq_src *irq_src, unsigned irq_type,
1235 enum amdgpu_ring_type ring_type); 1189 enum amdgpu_ring_type ring_type);
1236void amdgpu_ring_fini(struct amdgpu_ring *ring); 1190void amdgpu_ring_fini(struct amdgpu_ring *ring);
1191struct amdgpu_ring *amdgpu_ring_from_fence(struct fence *f);
1237 1192
1238/* 1193/*
1239 * CS. 1194 * CS.
@@ -1709,7 +1664,7 @@ struct amdgpu_vce {
1709/* 1664/*
1710 * SDMA 1665 * SDMA
1711 */ 1666 */
1712struct amdgpu_sdma { 1667struct amdgpu_sdma_instance {
1713 /* SDMA firmware */ 1668 /* SDMA firmware */
1714 const struct firmware *fw; 1669 const struct firmware *fw;
1715 uint32_t fw_version; 1670 uint32_t fw_version;
@@ -1719,6 +1674,13 @@ struct amdgpu_sdma {
1719 bool burst_nop; 1674 bool burst_nop;
1720}; 1675};
1721 1676
1677struct amdgpu_sdma {
1678 struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
1679 struct amdgpu_irq_src trap_irq;
1680 struct amdgpu_irq_src illegal_inst_irq;
1681 int num_instances;
1682};
1683
1722/* 1684/*
1723 * Firmware 1685 * Firmware
1724 */ 1686 */
@@ -1751,11 +1713,11 @@ void amdgpu_test_syncing(struct amdgpu_device *adev);
1751int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr); 1713int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
1752void amdgpu_mn_unregister(struct amdgpu_bo *bo); 1714void amdgpu_mn_unregister(struct amdgpu_bo *bo);
1753#else 1715#else
1754static int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) 1716static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
1755{ 1717{
1756 return -ENODEV; 1718 return -ENODEV;
1757} 1719}
1758static void amdgpu_mn_unregister(struct amdgpu_bo *bo) {} 1720static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
1759#endif 1721#endif
1760 1722
1761/* 1723/*
@@ -1947,7 +1909,6 @@ struct amdgpu_device {
1947 struct device *dev; 1909 struct device *dev;
1948 struct drm_device *ddev; 1910 struct drm_device *ddev;
1949 struct pci_dev *pdev; 1911 struct pci_dev *pdev;
1950 struct rw_semaphore exclusive_lock;
1951 1912
1952 /* ASIC */ 1913 /* ASIC */
1953 enum amd_asic_type asic_type; 1914 enum amd_asic_type asic_type;
@@ -1961,7 +1922,6 @@ struct amdgpu_device {
1961 bool suspend; 1922 bool suspend;
1962 bool need_dma32; 1923 bool need_dma32;
1963 bool accel_working; 1924 bool accel_working;
1964 bool needs_reset;
1965 struct work_struct reset_work; 1925 struct work_struct reset_work;
1966 struct notifier_block acpi_nb; 1926 struct notifier_block acpi_nb;
1967 struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; 1927 struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
@@ -2065,9 +2025,7 @@ struct amdgpu_device {
2065 struct amdgpu_gfx gfx; 2025 struct amdgpu_gfx gfx;
2066 2026
2067 /* sdma */ 2027 /* sdma */
2068 struct amdgpu_sdma sdma[AMDGPU_MAX_SDMA_INSTANCES]; 2028 struct amdgpu_sdma sdma;
2069 struct amdgpu_irq_src sdma_trap_irq;
2070 struct amdgpu_irq_src sdma_illegal_inst_irq;
2071 2029
2072 /* uvd */ 2030 /* uvd */
2073 bool has_uvd; 2031 bool has_uvd;
@@ -2204,17 +2162,18 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
2204 ring->ring_free_dw--; 2162 ring->ring_free_dw--;
2205} 2163}
2206 2164
2207static inline struct amdgpu_sdma * amdgpu_get_sdma_instance(struct amdgpu_ring *ring) 2165static inline struct amdgpu_sdma_instance *
2166amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
2208{ 2167{
2209 struct amdgpu_device *adev = ring->adev; 2168 struct amdgpu_device *adev = ring->adev;
2210 int i; 2169 int i;
2211 2170
2212 for (i = 0; i < AMDGPU_MAX_SDMA_INSTANCES; i++) 2171 for (i = 0; i < adev->sdma.num_instances; i++)
2213 if (&adev->sdma[i].ring == ring) 2172 if (&adev->sdma.instance[i].ring == ring)
2214 break; 2173 break;
2215 2174
2216 if (i < AMDGPU_MAX_SDMA_INSTANCES) 2175 if (i < AMDGPU_MAX_SDMA_INSTANCES)
2217 return &adev->sdma[i]; 2176 return &adev->sdma.instance[i];
2218 else 2177 else
2219 return NULL; 2178 return NULL;
2220} 2179}
@@ -2241,7 +2200,6 @@ static inline struct amdgpu_sdma * amdgpu_get_sdma_instance(struct amdgpu_ring *
2241#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) 2200#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
2242#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) 2201#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
2243#define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r)) 2202#define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r))
2244#define amdgpu_ring_is_lockup(r) (r)->funcs->is_lockup((r))
2245#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) 2203#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
2246#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) 2204#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
2247#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) 2205#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
@@ -2350,10 +2308,10 @@ void amdgpu_driver_preclose_kms(struct drm_device *dev,
2350 struct drm_file *file_priv); 2308 struct drm_file *file_priv);
2351int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon); 2309int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
2352int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon); 2310int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
2353u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, int crtc); 2311u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
2354int amdgpu_enable_vblank_kms(struct drm_device *dev, int crtc); 2312int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
2355void amdgpu_disable_vblank_kms(struct drm_device *dev, int crtc); 2313void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
2356int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, 2314int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
2357 int *max_error, 2315 int *max_error,
2358 struct timeval *vblank_time, 2316 struct timeval *vblank_time,
2359 unsigned flags); 2317 unsigned flags);