aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu.h
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-03-16 18:25:04 -0400
committerDave Airlie <airlied@redhat.com>2016-03-16 18:25:04 -0400
commit9f443bf53b5699835e0132d62d1e6c99a1eaeee8 (patch)
tree482b1f57019446cc866a0fc8e87bd4b0b0119775 /drivers/gpu/drm/amd/amdgpu/amdgpu.h
parent70a09f36d02584fe0025fa14a5cbf276240b2fd4 (diff)
parent00b7c4ff7d482d287a591f047e0963d494569b46 (diff)
Merge branch 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux into drm-next
A few more fixes and cleanups for 4.6: - DCE code cleanups - HDP flush/invalidation fixes - GPUVM fixes - switch to drm_vblank_[on|off] - PX fixes - misc bug fixes * 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux: (50 commits) drm/amdgpu: split pipeline sync out of SDMA vm_flush() as well drm/amdgpu: Revert "add mutex for ba_va->valids/invalids" drm/amdgpu: Revert "add lock for interval tree in vm" drm/amdgpu: Revert "add spin lock to protect freed list in vm (v3)" drm/amdgpu: reserve the PD during unmap and remove drm/amdgpu: Fix two bugs in amdgpu_vm_bo_split_mapping drm/radeon: Don't drop DP 2.7 Ghz link setup on some cards. MAINTAINERS: update radeon entry to include amdgpu as well drm/amdgpu: disable runtime pm on PX laptops without dGPU power control drm/radeon: disable runtime pm on PX laptops without dGPU power control drm/amd/amdgpu: Fix indentation in do_set_base() (DCEv8) drm/amd/amdgpu: make afmt_init cleanup if alloc fails (DCEv8) drm/amd/amdgpu: Move config init flag to bottom of sw_init (DCEv8) drm/amd/amdgpu: Don't proceed into audio_fini if audio is disabled (DCEv8) drm/amd/amdgpu: Fix identation in do_set_base() (DCEv10) drm/amd/amdgpu: Make afmt_init cleanup if alloc fails (DCEv10) drm/amd/amdgpu: Move initialized flag to bottom of sw_init (DCEv10) drm/amd/amdgpu: Don't proceed in audio_fini if disabled (DCEv10) drm/amd/amdgpu: Fix indentation in dce_v11_0_crtc_do_set_base() drm/amd/amdgpu: Make afmt_init() cleanup if alloc fails (DCEv11) ...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu.h')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h41
1 files changed, 27 insertions, 14 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d0489722fc7e..a80c8cea7609 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -287,9 +287,11 @@ struct amdgpu_ring_funcs {
287 struct amdgpu_ib *ib); 287 struct amdgpu_ib *ib);
288 void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, 288 void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
289 uint64_t seq, unsigned flags); 289 uint64_t seq, unsigned flags);
290 void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
290 void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id, 291 void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
291 uint64_t pd_addr); 292 uint64_t pd_addr);
292 void (*emit_hdp_flush)(struct amdgpu_ring *ring); 293 void (*emit_hdp_flush)(struct amdgpu_ring *ring);
294 void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
293 void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid, 295 void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
294 uint32_t gds_base, uint32_t gds_size, 296 uint32_t gds_base, uint32_t gds_size,
295 uint32_t gws_base, uint32_t gws_size, 297 uint32_t gws_base, uint32_t gws_size,
@@ -369,9 +371,6 @@ struct amdgpu_fence {
369 struct amdgpu_ring *ring; 371 struct amdgpu_ring *ring;
370 uint64_t seq; 372 uint64_t seq;
371 373
372 /* filp or special value for fence creator */
373 void *owner;
374
375 wait_queue_t fence_wake; 374 wait_queue_t fence_wake;
376}; 375};
377 376
@@ -392,8 +391,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
392 unsigned irq_type); 391 unsigned irq_type);
393void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); 392void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
394void amdgpu_fence_driver_resume(struct amdgpu_device *adev); 393void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
395int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, 394int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence);
396 struct amdgpu_fence **fence);
397void amdgpu_fence_process(struct amdgpu_ring *ring); 395void amdgpu_fence_process(struct amdgpu_ring *ring);
398int amdgpu_fence_wait_next(struct amdgpu_ring *ring); 396int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
399int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); 397int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
@@ -434,6 +432,8 @@ struct amdgpu_bo_list_entry {
434 struct ttm_validate_buffer tv; 432 struct ttm_validate_buffer tv;
435 struct amdgpu_bo_va *bo_va; 433 struct amdgpu_bo_va *bo_va;
436 uint32_t priority; 434 uint32_t priority;
435 struct page **user_pages;
436 int user_invalidated;
437}; 437};
438 438
439struct amdgpu_bo_va_mapping { 439struct amdgpu_bo_va_mapping {
@@ -445,7 +445,6 @@ struct amdgpu_bo_va_mapping {
445 445
446/* bo virtual addresses in a specific vm */ 446/* bo virtual addresses in a specific vm */
447struct amdgpu_bo_va { 447struct amdgpu_bo_va {
448 struct mutex mutex;
449 /* protected by bo being reserved */ 448 /* protected by bo being reserved */
450 struct list_head bo_list; 449 struct list_head bo_list;
451 struct fence *last_pt_update; 450 struct fence *last_pt_update;
@@ -596,6 +595,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
596struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); 595struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
597int amdgpu_sync_wait(struct amdgpu_sync *sync); 596int amdgpu_sync_wait(struct amdgpu_sync *sync);
598void amdgpu_sync_free(struct amdgpu_sync *sync); 597void amdgpu_sync_free(struct amdgpu_sync *sync);
598int amdgpu_sync_init(void);
599void amdgpu_sync_fini(void);
599 600
600/* 601/*
601 * GART structures, functions & helpers 602 * GART structures, functions & helpers
@@ -726,7 +727,7 @@ struct amdgpu_ib {
726 uint32_t length_dw; 727 uint32_t length_dw;
727 uint64_t gpu_addr; 728 uint64_t gpu_addr;
728 uint32_t *ptr; 729 uint32_t *ptr;
729 struct amdgpu_fence *fence; 730 struct fence *fence;
730 struct amdgpu_user_fence *user; 731 struct amdgpu_user_fence *user;
731 struct amdgpu_vm *vm; 732 struct amdgpu_vm *vm;
732 unsigned vm_id; 733 unsigned vm_id;
@@ -845,7 +846,6 @@ struct amdgpu_vm_id {
845 846
846struct amdgpu_vm { 847struct amdgpu_vm {
847 /* tree of virtual addresses mapped */ 848 /* tree of virtual addresses mapped */
848 spinlock_t it_lock;
849 struct rb_root va; 849 struct rb_root va;
850 850
851 /* protecting invalidated */ 851 /* protecting invalidated */
@@ -882,6 +882,13 @@ struct amdgpu_vm_manager_id {
882 struct list_head list; 882 struct list_head list;
883 struct fence *active; 883 struct fence *active;
884 atomic_long_t owner; 884 atomic_long_t owner;
885
886 uint32_t gds_base;
887 uint32_t gds_size;
888 uint32_t gws_base;
889 uint32_t gws_size;
890 uint32_t oa_base;
891 uint32_t oa_size;
885}; 892};
886 893
887struct amdgpu_vm_manager { 894struct amdgpu_vm_manager {
@@ -917,8 +924,11 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
917 struct amdgpu_sync *sync, struct fence *fence, 924 struct amdgpu_sync *sync, struct fence *fence,
918 unsigned *vm_id, uint64_t *vm_pd_addr); 925 unsigned *vm_id, uint64_t *vm_pd_addr);
919void amdgpu_vm_flush(struct amdgpu_ring *ring, 926void amdgpu_vm_flush(struct amdgpu_ring *ring,
920 unsigned vmid, 927 unsigned vm_id, uint64_t pd_addr,
921 uint64_t pd_addr); 928 uint32_t gds_base, uint32_t gds_size,
929 uint32_t gws_base, uint32_t gws_size,
930 uint32_t oa_base, uint32_t oa_size);
931void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
922uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); 932uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
923int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, 933int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
924 struct amdgpu_vm *vm); 934 struct amdgpu_vm *vm);
@@ -1006,7 +1016,7 @@ struct amdgpu_bo_list {
1006 struct amdgpu_bo *gds_obj; 1016 struct amdgpu_bo *gds_obj;
1007 struct amdgpu_bo *gws_obj; 1017 struct amdgpu_bo *gws_obj;
1008 struct amdgpu_bo *oa_obj; 1018 struct amdgpu_bo *oa_obj;
1009 bool has_userptr; 1019 unsigned first_userptr;
1010 unsigned num_entries; 1020 unsigned num_entries;
1011 struct amdgpu_bo_list_entry *array; 1021 struct amdgpu_bo_list_entry *array;
1012}; 1022};
@@ -1135,8 +1145,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1135 unsigned size, struct amdgpu_ib *ib); 1145 unsigned size, struct amdgpu_ib *ib);
1136void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib); 1146void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib);
1137int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, 1147int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
1138 struct amdgpu_ib *ib, void *owner, 1148 struct amdgpu_ib *ib, struct fence *last_vm_update,
1139 struct fence *last_vm_update,
1140 struct fence **f); 1149 struct fence **f);
1141int amdgpu_ib_pool_init(struct amdgpu_device *adev); 1150int amdgpu_ib_pool_init(struct amdgpu_device *adev);
1142void amdgpu_ib_pool_fini(struct amdgpu_device *adev); 1151void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
@@ -2012,7 +2021,6 @@ struct amdgpu_device {
2012 struct amdgpu_sdma sdma; 2021 struct amdgpu_sdma sdma;
2013 2022
2014 /* uvd */ 2023 /* uvd */
2015 bool has_uvd;
2016 struct amdgpu_uvd uvd; 2024 struct amdgpu_uvd uvd;
2017 2025
2018 /* vce */ 2026 /* vce */
@@ -2186,10 +2194,12 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
2186#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) 2194#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
2187#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) 2195#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
2188#define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib)) 2196#define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib))
2197#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
2189#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) 2198#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
2190#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) 2199#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
2191#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) 2200#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
2192#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) 2201#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
2202#define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
2193#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) 2203#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
2194#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) 2204#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
2195#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) 2205#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
@@ -2314,12 +2324,15 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
2314 struct amdgpu_ring **out_ring); 2324 struct amdgpu_ring **out_ring);
2315void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain); 2325void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain);
2316bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); 2326bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
2327int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
2317int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, 2328int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
2318 uint32_t flags); 2329 uint32_t flags);
2319bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); 2330bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
2320struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); 2331struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
2321bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, 2332bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
2322 unsigned long end); 2333 unsigned long end);
2334bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
2335 int *last_invalidated);
2323bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); 2336bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
2324uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, 2337uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
2325 struct ttm_mem_reg *mem); 2338 struct ttm_mem_reg *mem);