aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu.h
diff options
context:
space:
mode:
authorAndres Rodriguez <andresx7@gmail.com>2017-02-16 00:47:32 -0500
committerAlex Deucher <alexander.deucher@amd.com>2017-05-31 16:49:01 -0400
commiteffd924d2f3b9c52d5bd8137c3803e83f719a290 (patch)
tree2705aaf11786cd79e5f17f4283cc7ff511d9f616 /drivers/gpu/drm/amd/amdgpu/amdgpu.h
parentecd910eb1f091dd25f4a737a3bc50c0c8892eac7 (diff)
drm/amdgpu: untie user ring ids from kernel ring ids v6
Add amdgpu_queue_mgr, a mechanism that allows disjointing usermode's ring ids from the kernel's ring ids. The queue manager maintains a per-file descriptor map of user ring ids to amdgpu_ring pointers. Once a map is created it is permanent (this is required to maintain FIFO execution guarantees for a context's ring). Different queue map policies can be configured for each HW IP. Currently all HW IPs use the identity mapper, i.e. kernel ring id is equal to the user ring id. The purpose of this mechanism is to distribute the load across multiple queues more effectively for HW IPs that support multiple rings. Userspace clients are unable to check whether a specific resource is in use by a different client. Therefore, it is up to the kernel driver to make the optimal choice. v2: remove amdgpu_queue_mapper_funcs v3: made amdgpu_queue_mgr per context instead of per-fd v4: add context_put on error paths v5: rebase and include new IPs UVD_ENC & VCN_* v6: drop unused amdgpu_ring_is_valid_index (Alex) Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Andres Rodriguez <andresx7@gmail.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu.h')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h27
1 files changed, 24 insertions, 3 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 1135f19c9e9c..aad1d7bf695a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -777,6 +777,29 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
777 struct dma_fence **f); 777 struct dma_fence **f);
778 778
779/* 779/*
780 * Queue manager
781 */
782struct amdgpu_queue_mapper {
783 int hw_ip;
784 struct mutex lock;
785 /* protected by lock */
786 struct amdgpu_ring *queue_map[AMDGPU_MAX_RINGS];
787};
788
789struct amdgpu_queue_mgr {
790 struct amdgpu_queue_mapper mapper[AMDGPU_MAX_IP_NUM];
791};
792
793int amdgpu_queue_mgr_init(struct amdgpu_device *adev,
794 struct amdgpu_queue_mgr *mgr);
795int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
796 struct amdgpu_queue_mgr *mgr);
797int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
798 struct amdgpu_queue_mgr *mgr,
799 int hw_ip, int instance, int ring,
800 struct amdgpu_ring **out_ring);
801
802/*
780 * context related structures 803 * context related structures
781 */ 804 */
782 805
@@ -789,6 +812,7 @@ struct amdgpu_ctx_ring {
789struct amdgpu_ctx { 812struct amdgpu_ctx {
790 struct kref refcount; 813 struct kref refcount;
791 struct amdgpu_device *adev; 814 struct amdgpu_device *adev;
815 struct amdgpu_queue_mgr queue_mgr;
792 unsigned reset_counter; 816 unsigned reset_counter;
793 spinlock_t ring_lock; 817 spinlock_t ring_lock;
794 struct dma_fence **fences; 818 struct dma_fence **fences;
@@ -1909,9 +1933,6 @@ bool amdgpu_need_post(struct amdgpu_device *adev);
1909void amdgpu_update_display_priority(struct amdgpu_device *adev); 1933void amdgpu_update_display_priority(struct amdgpu_device *adev);
1910 1934
1911int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); 1935int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
1912int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
1913 u32 ip_instance, u32 ring,
1914 struct amdgpu_ring **out_ring);
1915void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes); 1936void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes);
1916void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); 1937void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
1917bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); 1938bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);