aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu.h
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2015-11-04 19:57:25 -0500
committerDave Airlie <airlied@redhat.com>2015-11-04 19:57:25 -0500
commit793423ffcb229ae5654b382a1356906f81da2018 (patch)
treec95dcf05246a9c44d9ce15f4d9fb1c4012598fe5 /drivers/gpu/drm/amd/amdgpu/amdgpu.h
parentbf248ca1f5c7ba1e535ba4bd517a15a1ae965c69 (diff)
parenta95e264254dca5b6bfb331d5902930d0787bd7e1 (diff)
Merge branch 'drm-next-4.4' of git://people.freedesktop.org/~agd5f/linux into drm-next
- Updated register headers for GFX 8.1 for Stoney - Add some new CZ revisions - minor pageflip optimizations - Fencing clean up - Warning fix - More fence cleanup - oops fix - Fiji fixes * 'drm-next-4.4' of git://people.freedesktop.org/~agd5f/linux: (29 commits) drm/amdgpu: group together common fence implementation drm/amdgpu: remove AMDGPU_FENCE_OWNER_MOVE drm/amdgpu: remove now unused fence functions drm/amdgpu: fix fence fallback check drm/amdgpu: fix stoping the scheduler timeout drm/amdgpu: cleanup on error in amdgpu_cs_ioctl() drm/amdgpu: update Fiji's Golden setting drm/amdgpu: update Fiji's rev id drm/amdgpu: extract common code in vi_common_early_init drm/amd/scheduler: don't oops on failure to load drm/amdgpu: don't oops on failure to load (v2) drm/amdgpu: don't VT switch on suspend drm/amdgpu: Make amdgpu_mn functions inline drm/amdgpu: remove amdgpu_fence_ref/unref drm/amdgpu: use common fence for sync drm/amdgpu: use the new fence_is_later drm/amdgpu: use common fences for VMID management v2 drm/amdgpu: move ring_from_fence to common code drm/amdgpu: switch to common fence_wait_any_timeout v2 drm/amdgpu: remove unneeded fence functions ...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu.h')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h58
1 files changed, 6 insertions, 52 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 5c400f4b87fd..de9312c02185 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -405,7 +405,6 @@ struct amdgpu_fence_driver {
405/* some special values for the owner field */ 405/* some special values for the owner field */
406#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul) 406#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
407#define AMDGPU_FENCE_OWNER_VM ((void*)1ul) 407#define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
408#define AMDGPU_FENCE_OWNER_MOVE ((void*)2ul)
409 408
410#define AMDGPU_FENCE_FLAG_64BIT (1 << 0) 409#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
411#define AMDGPU_FENCE_FLAG_INT (1 << 1) 410#define AMDGPU_FENCE_FLAG_INT (1 << 1)
@@ -447,57 +446,11 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
447int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); 446int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
448unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); 447unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
449 448
450signed long amdgpu_fence_wait_any(struct fence **array,
451 uint32_t count,
452 bool intr,
453 signed long t);
454struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
455void amdgpu_fence_unref(struct amdgpu_fence **fence);
456
457bool amdgpu_fence_need_sync(struct amdgpu_fence *fence, 449bool amdgpu_fence_need_sync(struct amdgpu_fence *fence,
458 struct amdgpu_ring *ring); 450 struct amdgpu_ring *ring);
459void amdgpu_fence_note_sync(struct amdgpu_fence *fence, 451void amdgpu_fence_note_sync(struct amdgpu_fence *fence,
460 struct amdgpu_ring *ring); 452 struct amdgpu_ring *ring);
461 453
462static inline struct amdgpu_fence *amdgpu_fence_later(struct amdgpu_fence *a,
463 struct amdgpu_fence *b)
464{
465 if (!a) {
466 return b;
467 }
468
469 if (!b) {
470 return a;
471 }
472
473 BUG_ON(a->ring != b->ring);
474
475 if (a->seq > b->seq) {
476 return a;
477 } else {
478 return b;
479 }
480}
481
482static inline bool amdgpu_fence_is_earlier(struct amdgpu_fence *a,
483 struct amdgpu_fence *b)
484{
485 if (!a) {
486 return false;
487 }
488
489 if (!b) {
490 return true;
491 }
492
493 BUG_ON(a->ring != b->ring);
494
495 return a->seq < b->seq;
496}
497
498int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
499 void *owner, struct amdgpu_fence **fence);
500
501/* 454/*
502 * TTM. 455 * TTM.
503 */ 456 */
@@ -708,7 +661,7 @@ void amdgpu_semaphore_free(struct amdgpu_device *adev,
708 */ 661 */
709struct amdgpu_sync { 662struct amdgpu_sync {
710 struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS]; 663 struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS];
711 struct amdgpu_fence *sync_to[AMDGPU_MAX_RINGS]; 664 struct fence *sync_to[AMDGPU_MAX_RINGS];
712 DECLARE_HASHTABLE(fences, 4); 665 DECLARE_HASHTABLE(fences, 4);
713 struct fence *last_vm_update; 666 struct fence *last_vm_update;
714}; 667};
@@ -974,7 +927,7 @@ struct amdgpu_vm_id {
974 /* last flushed PD/PT update */ 927 /* last flushed PD/PT update */
975 struct fence *flushed_updates; 928 struct fence *flushed_updates;
976 /* last use of vmid */ 929 /* last use of vmid */
977 struct amdgpu_fence *last_id_use; 930 struct fence *last_id_use;
978}; 931};
979 932
980struct amdgpu_vm { 933struct amdgpu_vm {
@@ -1007,7 +960,7 @@ struct amdgpu_vm {
1007}; 960};
1008 961
1009struct amdgpu_vm_manager { 962struct amdgpu_vm_manager {
1010 struct amdgpu_fence *active[AMDGPU_NUM_VM]; 963 struct fence *active[AMDGPU_NUM_VM];
1011 uint32_t max_pfn; 964 uint32_t max_pfn;
1012 /* number of VMIDs */ 965 /* number of VMIDs */
1013 unsigned nvm; 966 unsigned nvm;
@@ -1235,6 +1188,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
1235 struct amdgpu_irq_src *irq_src, unsigned irq_type, 1188 struct amdgpu_irq_src *irq_src, unsigned irq_type,
1236 enum amdgpu_ring_type ring_type); 1189 enum amdgpu_ring_type ring_type);
1237void amdgpu_ring_fini(struct amdgpu_ring *ring); 1190void amdgpu_ring_fini(struct amdgpu_ring *ring);
1191struct amdgpu_ring *amdgpu_ring_from_fence(struct fence *f);
1238 1192
1239/* 1193/*
1240 * CS. 1194 * CS.
@@ -1758,11 +1712,11 @@ void amdgpu_test_syncing(struct amdgpu_device *adev);
1758int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr); 1712int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
1759void amdgpu_mn_unregister(struct amdgpu_bo *bo); 1713void amdgpu_mn_unregister(struct amdgpu_bo *bo);
1760#else 1714#else
1761static int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) 1715static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
1762{ 1716{
1763 return -ENODEV; 1717 return -ENODEV;
1764} 1718}
1765static void amdgpu_mn_unregister(struct amdgpu_bo *bo) {} 1719static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
1766#endif 1720#endif
1767 1721
1768/* 1722/*