aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2018-08-07 16:09:08 -0400
committerDave Airlie <airlied@redhat.com>2018-08-07 16:22:23 -0400
commit940fbcb73fd25b517fa10c5a9cc96ca0ce1a2fc4 (patch)
treec967fae5501fefe9258f9891371977833bd2a72c
parent569f0a8694d0ff13c5d296a594c7d8cec8d6f35f (diff)
parentdf36b2fb8390d98453fff1aae3927095fe9ff36c (diff)
Merge branch 'drm-next-4.19' of git://people.freedesktop.org/~agd5f/linux into drm-next
Fixes for 4.19: - Fix UVD 7.2 instance handling - Fix UVD 7.2 harvesting - GPU scheduler fix for when a process is killed - TTM cleanups - amdgpu CS bo_list fixes - Powerplay fixes for polaris12 and CZ/ST - DC fixes for link training certain HMDs - DC fix for vega10 blank screen in certain cases From: Alex Deucher <alexdeucher@gmail.com> Signed-off-by: Dave Airlie <airlied@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180801222906.1016-1-alexander.deucher@amd.com
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c189
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c169
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c85
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c55
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h127
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/engine.h106
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h15
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c43
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c7
-rw-r--r--drivers/gpu/drm/scheduler/Makefile1
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler.c41
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c62
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c59
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c25
-rw-r--r--include/drm/gpu_scheduler.h2
-rw-r--r--include/drm/ttm/ttm_set_memory.h150
50 files changed, 911 insertions, 711 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 0283e2b3c851..447c4c7a36d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -74,6 +74,7 @@
74#include "amdgpu_gart.h" 74#include "amdgpu_gart.h"
75#include "amdgpu_debugfs.h" 75#include "amdgpu_debugfs.h"
76#include "amdgpu_job.h" 76#include "amdgpu_job.h"
77#include "amdgpu_bo_list.h"
77 78
78/* 79/*
79 * Modules parameters. 80 * Modules parameters.
@@ -690,45 +691,6 @@ struct amdgpu_fpriv {
690}; 691};
691 692
692/* 693/*
693 * residency list
694 */
695struct amdgpu_bo_list_entry {
696 struct amdgpu_bo *robj;
697 struct ttm_validate_buffer tv;
698 struct amdgpu_bo_va *bo_va;
699 uint32_t priority;
700 struct page **user_pages;
701 int user_invalidated;
702};
703
704struct amdgpu_bo_list {
705 struct mutex lock;
706 struct rcu_head rhead;
707 struct kref refcount;
708 struct amdgpu_bo *gds_obj;
709 struct amdgpu_bo *gws_obj;
710 struct amdgpu_bo *oa_obj;
711 unsigned first_userptr;
712 unsigned num_entries;
713 struct amdgpu_bo_list_entry *array;
714};
715
716struct amdgpu_bo_list *
717amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
718void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
719 struct list_head *validated);
720void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
721void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
722int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
723 struct drm_amdgpu_bo_list_entry **info_param);
724
725int amdgpu_bo_list_create(struct amdgpu_device *adev,
726 struct drm_file *filp,
727 struct drm_amdgpu_bo_list_entry *info,
728 unsigned num_entries,
729 struct amdgpu_bo_list **list);
730
731/*
732 * GFX stuff 694 * GFX stuff
733 */ 695 */
734#include "clearstate_defs.h" 696#include "clearstate_defs.h"
@@ -1748,6 +1710,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
1748#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) 1710#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
1749#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) 1711#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
1750#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) 1712#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
1713#define amdgpu_ring_patch_cs_in_place(r, p, ib) ((r)->funcs->patch_cs_in_place((p), (ib)))
1751#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) 1714#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
1752#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t)) 1715#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
1753#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) 1716#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 7679c068c89a..d472a2c8399f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -35,83 +35,53 @@
35#define AMDGPU_BO_LIST_MAX_PRIORITY 32u 35#define AMDGPU_BO_LIST_MAX_PRIORITY 32u
36#define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1) 36#define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1)
37 37
38static int amdgpu_bo_list_set(struct amdgpu_device *adev, 38static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu)
39 struct drm_file *filp, 39{
40 struct amdgpu_bo_list *list, 40 struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list,
41 struct drm_amdgpu_bo_list_entry *info, 41 rhead);
42 unsigned num_entries); 42
43 kvfree(list);
44}
43 45
44static void amdgpu_bo_list_release_rcu(struct kref *ref) 46static void amdgpu_bo_list_free(struct kref *ref)
45{ 47{
46 unsigned i;
47 struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list, 48 struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list,
48 refcount); 49 refcount);
50 struct amdgpu_bo_list_entry *e;
49 51
50 for (i = 0; i < list->num_entries; ++i) 52 amdgpu_bo_list_for_each_entry(e, list)
51 amdgpu_bo_unref(&list->array[i].robj); 53 amdgpu_bo_unref(&e->robj);
52 54
53 mutex_destroy(&list->lock); 55 call_rcu(&list->rhead, amdgpu_bo_list_free_rcu);
54 kvfree(list->array);
55 kfree_rcu(list, rhead);
56} 56}
57 57
58int amdgpu_bo_list_create(struct amdgpu_device *adev, 58int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
59 struct drm_file *filp, 59 struct drm_amdgpu_bo_list_entry *info,
60 struct drm_amdgpu_bo_list_entry *info, 60 unsigned num_entries, struct amdgpu_bo_list **result)
61 unsigned num_entries,
62 struct amdgpu_bo_list **list_out)
63{ 61{
62 unsigned last_entry = 0, first_userptr = num_entries;
63 struct amdgpu_bo_list_entry *array;
64 struct amdgpu_bo_list *list; 64 struct amdgpu_bo_list *list;
65 uint64_t total_size = 0;
66 size_t size;
67 unsigned i;
65 int r; 68 int r;
66 69
70 if (num_entries > SIZE_MAX / sizeof(struct amdgpu_bo_list_entry))
71 return -EINVAL;
67 72
68 list = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL); 73 size = sizeof(struct amdgpu_bo_list);
74 size += num_entries * sizeof(struct amdgpu_bo_list_entry);
75 list = kvmalloc(size, GFP_KERNEL);
69 if (!list) 76 if (!list)
70 return -ENOMEM; 77 return -ENOMEM;
71 78
72 /* initialize bo list*/
73 mutex_init(&list->lock);
74 kref_init(&list->refcount); 79 kref_init(&list->refcount);
75 r = amdgpu_bo_list_set(adev, filp, list, info, num_entries); 80 list->gds_obj = adev->gds.gds_gfx_bo;
76 if (r) { 81 list->gws_obj = adev->gds.gws_gfx_bo;
77 kfree(list); 82 list->oa_obj = adev->gds.oa_gfx_bo;
78 return r;
79 }
80
81 *list_out = list;
82 return 0;
83}
84
85static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
86{
87 struct amdgpu_bo_list *list;
88 83
89 mutex_lock(&fpriv->bo_list_lock); 84 array = amdgpu_bo_list_array_entry(list, 0);
90 list = idr_remove(&fpriv->bo_list_handles, id);
91 mutex_unlock(&fpriv->bo_list_lock);
92 if (list)
93 kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
94}
95
96static int amdgpu_bo_list_set(struct amdgpu_device *adev,
97 struct drm_file *filp,
98 struct amdgpu_bo_list *list,
99 struct drm_amdgpu_bo_list_entry *info,
100 unsigned num_entries)
101{
102 struct amdgpu_bo_list_entry *array;
103 struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
104 struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
105 struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
106
107 unsigned last_entry = 0, first_userptr = num_entries;
108 unsigned i;
109 int r;
110 unsigned long total_size = 0;
111
112 array = kvmalloc_array(num_entries, sizeof(struct amdgpu_bo_list_entry), GFP_KERNEL);
113 if (!array)
114 return -ENOMEM;
115 memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry)); 85 memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
116 86
117 for (i = 0; i < num_entries; ++i) { 87 for (i = 0; i < num_entries; ++i) {
@@ -148,59 +118,56 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
148 entry->tv.shared = !entry->robj->prime_shared_count; 118 entry->tv.shared = !entry->robj->prime_shared_count;
149 119
150 if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS) 120 if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
151 gds_obj = entry->robj; 121 list->gds_obj = entry->robj;
152 if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS) 122 if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
153 gws_obj = entry->robj; 123 list->gws_obj = entry->robj;
154 if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA) 124 if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
155 oa_obj = entry->robj; 125 list->oa_obj = entry->robj;
156 126
157 total_size += amdgpu_bo_size(entry->robj); 127 total_size += amdgpu_bo_size(entry->robj);
158 trace_amdgpu_bo_list_set(list, entry->robj); 128 trace_amdgpu_bo_list_set(list, entry->robj);
159 } 129 }
160 130
161 for (i = 0; i < list->num_entries; ++i)
162 amdgpu_bo_unref(&list->array[i].robj);
163
164 kvfree(list->array);
165
166 list->gds_obj = gds_obj;
167 list->gws_obj = gws_obj;
168 list->oa_obj = oa_obj;
169 list->first_userptr = first_userptr; 131 list->first_userptr = first_userptr;
170 list->array = array;
171 list->num_entries = num_entries; 132 list->num_entries = num_entries;
172 133
173 trace_amdgpu_cs_bo_status(list->num_entries, total_size); 134 trace_amdgpu_cs_bo_status(list->num_entries, total_size);
135
136 *result = list;
174 return 0; 137 return 0;
175 138
176error_free: 139error_free:
177 while (i--) 140 while (i--)
178 amdgpu_bo_unref(&array[i].robj); 141 amdgpu_bo_unref(&array[i].robj);
179 kvfree(array); 142 kvfree(list);
180 return r; 143 return r;
144
181} 145}
182 146
183struct amdgpu_bo_list * 147static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
184amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
185{ 148{
186 struct amdgpu_bo_list *result; 149 struct amdgpu_bo_list *list;
187 150
151 mutex_lock(&fpriv->bo_list_lock);
152 list = idr_remove(&fpriv->bo_list_handles, id);
153 mutex_unlock(&fpriv->bo_list_lock);
154 if (list)
155 kref_put(&list->refcount, amdgpu_bo_list_free);
156}
157
158int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
159 struct amdgpu_bo_list **result)
160{
188 rcu_read_lock(); 161 rcu_read_lock();
189 result = idr_find(&fpriv->bo_list_handles, id); 162 *result = idr_find(&fpriv->bo_list_handles, id);
190 163
191 if (result) { 164 if (*result && kref_get_unless_zero(&(*result)->refcount)) {
192 if (kref_get_unless_zero(&result->refcount)) {
193 rcu_read_unlock();
194 mutex_lock(&result->lock);
195 } else {
196 rcu_read_unlock();
197 result = NULL;
198 }
199 } else {
200 rcu_read_unlock(); 165 rcu_read_unlock();
166 return 0;
201 } 167 }
202 168
203 return result; 169 rcu_read_unlock();
170 return -ENOENT;
204} 171}
205 172
206void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, 173void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
@@ -211,6 +178,7 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
211 * concatenated in descending order. 178 * concatenated in descending order.
212 */ 179 */
213 struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS]; 180 struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
181 struct amdgpu_bo_list_entry *e;
214 unsigned i; 182 unsigned i;
215 183
216 for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++) 184 for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
@@ -221,14 +189,13 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
221 * in the list, the sort mustn't change the ordering of buffers 189 * in the list, the sort mustn't change the ordering of buffers
222 * with the same priority, i.e. it must be stable. 190 * with the same priority, i.e. it must be stable.
223 */ 191 */
224 for (i = 0; i < list->num_entries; i++) { 192 amdgpu_bo_list_for_each_entry(e, list) {
225 unsigned priority = list->array[i].priority; 193 unsigned priority = e->priority;
226 194
227 if (!list->array[i].robj->parent) 195 if (!e->robj->parent)
228 list_add_tail(&list->array[i].tv.head, 196 list_add_tail(&e->tv.head, &bucket[priority]);
229 &bucket[priority]);
230 197
231 list->array[i].user_pages = NULL; 198 e->user_pages = NULL;
232 } 199 }
233 200
234 /* Connect the sorted buckets in the output list. */ 201 /* Connect the sorted buckets in the output list. */
@@ -238,20 +205,7 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
238 205
239void amdgpu_bo_list_put(struct amdgpu_bo_list *list) 206void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
240{ 207{
241 mutex_unlock(&list->lock); 208 kref_put(&list->refcount, amdgpu_bo_list_free);
242 kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
243}
244
245void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
246{
247 unsigned i;
248
249 for (i = 0; i < list->num_entries; ++i)
250 amdgpu_bo_unref(&list->array[i].robj);
251
252 mutex_destroy(&list->lock);
253 kvfree(list->array);
254 kfree(list);
255} 209}
256 210
257int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in, 211int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
@@ -304,7 +258,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
304 union drm_amdgpu_bo_list *args = data; 258 union drm_amdgpu_bo_list *args = data;
305 uint32_t handle = args->in.list_handle; 259 uint32_t handle = args->in.list_handle;
306 struct drm_amdgpu_bo_list_entry *info = NULL; 260 struct drm_amdgpu_bo_list_entry *info = NULL;
307 struct amdgpu_bo_list *list; 261 struct amdgpu_bo_list *list, *old;
308 int r; 262 int r;
309 263
310 r = amdgpu_bo_create_list_entry_array(&args->in, &info); 264 r = amdgpu_bo_create_list_entry_array(&args->in, &info);
@@ -322,7 +276,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
322 r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL); 276 r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
323 mutex_unlock(&fpriv->bo_list_lock); 277 mutex_unlock(&fpriv->bo_list_lock);
324 if (r < 0) { 278 if (r < 0) {
325 amdgpu_bo_list_free(list); 279 amdgpu_bo_list_put(list);
326 return r; 280 return r;
327 } 281 }
328 282
@@ -335,17 +289,22 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
335 break; 289 break;
336 290
337 case AMDGPU_BO_LIST_OP_UPDATE: 291 case AMDGPU_BO_LIST_OP_UPDATE:
338 r = -ENOENT; 292 r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
339 list = amdgpu_bo_list_get(fpriv, handle); 293 &list);
340 if (!list) 294 if (r)
341 goto error_free; 295 goto error_free;
342 296
343 r = amdgpu_bo_list_set(adev, filp, list, info, 297 mutex_lock(&fpriv->bo_list_lock);
344 args->in.bo_number); 298 old = idr_replace(&fpriv->bo_list_handles, list, handle);
345 amdgpu_bo_list_put(list); 299 mutex_unlock(&fpriv->bo_list_lock);
346 if (r) 300
301 if (IS_ERR(old)) {
302 amdgpu_bo_list_put(list);
303 r = PTR_ERR(old);
347 goto error_free; 304 goto error_free;
305 }
348 306
307 amdgpu_bo_list_put(old);
349 break; 308 break;
350 309
351 default: 310 default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
new file mode 100644
index 000000000000..61b089768e1c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -0,0 +1,85 @@
1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#ifndef __AMDGPU_BO_LIST_H__
24#define __AMDGPU_BO_LIST_H__
25
26#include <drm/ttm/ttm_execbuf_util.h>
27#include <drm/amdgpu_drm.h>
28
29struct amdgpu_device;
30struct amdgpu_bo;
31struct amdgpu_bo_va;
32struct amdgpu_fpriv;
33
34struct amdgpu_bo_list_entry {
35 struct amdgpu_bo *robj;
36 struct ttm_validate_buffer tv;
37 struct amdgpu_bo_va *bo_va;
38 uint32_t priority;
39 struct page **user_pages;
40 int user_invalidated;
41};
42
43struct amdgpu_bo_list {
44 struct rcu_head rhead;
45 struct kref refcount;
46 struct amdgpu_bo *gds_obj;
47 struct amdgpu_bo *gws_obj;
48 struct amdgpu_bo *oa_obj;
49 unsigned first_userptr;
50 unsigned num_entries;
51};
52
53int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
54 struct amdgpu_bo_list **result);
55void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
56 struct list_head *validated);
57void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
58int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
59 struct drm_amdgpu_bo_list_entry **info_param);
60
61int amdgpu_bo_list_create(struct amdgpu_device *adev,
62 struct drm_file *filp,
63 struct drm_amdgpu_bo_list_entry *info,
64 unsigned num_entries,
65 struct amdgpu_bo_list **list);
66
67static inline struct amdgpu_bo_list_entry *
68amdgpu_bo_list_array_entry(struct amdgpu_bo_list *list, unsigned index)
69{
70 struct amdgpu_bo_list_entry *array = (void *)&list[1];
71
72 return &array[index];
73}
74
75#define amdgpu_bo_list_for_each_entry(e, list) \
76 for (e = amdgpu_bo_list_array_entry(list, 0); \
77 e != amdgpu_bo_list_array_entry(list, (list)->num_entries); \
78 ++e)
79
80#define amdgpu_bo_list_for_each_userptr_entry(e, list) \
81 for (e = amdgpu_bo_list_array_entry(list, (list)->first_userptr); \
82 e != amdgpu_bo_list_array_entry(list, (list)->num_entries); \
83 ++e)
84
85#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 178d9ce4eba1..502b94fb116a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -561,28 +561,38 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
561 union drm_amdgpu_cs *cs) 561 union drm_amdgpu_cs *cs)
562{ 562{
563 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 563 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
564 struct amdgpu_vm *vm = &fpriv->vm;
564 struct amdgpu_bo_list_entry *e; 565 struct amdgpu_bo_list_entry *e;
565 struct list_head duplicates; 566 struct list_head duplicates;
566 unsigned i, tries = 10;
567 struct amdgpu_bo *gds; 567 struct amdgpu_bo *gds;
568 struct amdgpu_bo *gws; 568 struct amdgpu_bo *gws;
569 struct amdgpu_bo *oa; 569 struct amdgpu_bo *oa;
570 unsigned tries = 10;
570 int r; 571 int r;
571 572
572 INIT_LIST_HEAD(&p->validated); 573 INIT_LIST_HEAD(&p->validated);
573 574
574 /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */ 575 /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
575 if (!p->bo_list) 576 if (cs->in.bo_list_handle) {
576 p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); 577 if (p->bo_list)
577 else 578 return -EINVAL;
578 mutex_lock(&p->bo_list->lock);
579 579
580 if (p->bo_list) { 580 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
581 amdgpu_bo_list_get_list(p->bo_list, &p->validated); 581 &p->bo_list);
582 if (p->bo_list->first_userptr != p->bo_list->num_entries) 582 if (r)
583 p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX); 583 return r;
584 } else if (!p->bo_list) {
585 /* Create a empty bo_list when no handle is provided */
586 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
587 &p->bo_list);
588 if (r)
589 return r;
584 } 590 }
585 591
592 amdgpu_bo_list_get_list(p->bo_list, &p->validated);
593 if (p->bo_list->first_userptr != p->bo_list->num_entries)
594 p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
595
586 INIT_LIST_HEAD(&duplicates); 596 INIT_LIST_HEAD(&duplicates);
587 amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); 597 amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
588 598
@@ -591,7 +601,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
591 601
592 while (1) { 602 while (1) {
593 struct list_head need_pages; 603 struct list_head need_pages;
594 unsigned i;
595 604
596 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, 605 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
597 &duplicates); 606 &duplicates);
@@ -601,17 +610,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
601 goto error_free_pages; 610 goto error_free_pages;
602 } 611 }
603 612
604 /* Without a BO list we don't have userptr BOs */
605 if (!p->bo_list)
606 break;
607
608 INIT_LIST_HEAD(&need_pages); 613 INIT_LIST_HEAD(&need_pages);
609 for (i = p->bo_list->first_userptr; 614 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
610 i < p->bo_list->num_entries; ++i) { 615 struct amdgpu_bo *bo = e->robj;
611 struct amdgpu_bo *bo;
612
613 e = &p->bo_list->array[i];
614 bo = e->robj;
615 616
616 if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm, 617 if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
617 &e->user_invalidated) && e->user_pages) { 618 &e->user_invalidated) && e->user_pages) {
@@ -703,23 +704,12 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
703 amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, 704 amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
704 p->bytes_moved_vis); 705 p->bytes_moved_vis);
705 706
706 if (p->bo_list) { 707 gds = p->bo_list->gds_obj;
707 struct amdgpu_vm *vm = &fpriv->vm; 708 gws = p->bo_list->gws_obj;
708 unsigned i; 709 oa = p->bo_list->oa_obj;
709
710 gds = p->bo_list->gds_obj;
711 gws = p->bo_list->gws_obj;
712 oa = p->bo_list->oa_obj;
713 for (i = 0; i < p->bo_list->num_entries; i++) {
714 struct amdgpu_bo *bo = p->bo_list->array[i].robj;
715 710
716 p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo); 711 amdgpu_bo_list_for_each_entry(e, p->bo_list)
717 } 712 e->bo_va = amdgpu_vm_bo_find(vm, e->robj);
718 } else {
719 gds = p->adev->gds.gds_gfx_bo;
720 gws = p->adev->gds.gws_gfx_bo;
721 oa = p->adev->gds.oa_gfx_bo;
722 }
723 713
724 if (gds) { 714 if (gds) {
725 p->job->gds_base = amdgpu_bo_gpu_offset(gds); 715 p->job->gds_base = amdgpu_bo_gpu_offset(gds);
@@ -747,18 +737,13 @@ error_validate:
747 737
748error_free_pages: 738error_free_pages:
749 739
750 if (p->bo_list) { 740 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
751 for (i = p->bo_list->first_userptr; 741 if (!e->user_pages)
752 i < p->bo_list->num_entries; ++i) { 742 continue;
753 e = &p->bo_list->array[i];
754
755 if (!e->user_pages)
756 continue;
757 743
758 release_pages(e->user_pages, 744 release_pages(e->user_pages,
759 e->robj->tbo.ttm->num_pages); 745 e->robj->tbo.ttm->num_pages);
760 kvfree(e->user_pages); 746 kvfree(e->user_pages);
761 }
762 } 747 }
763 748
764 return r; 749 return r;
@@ -820,12 +805,13 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
820 805
821static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p) 806static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
822{ 807{
823 struct amdgpu_device *adev = p->adev;
824 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 808 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
809 struct amdgpu_device *adev = p->adev;
825 struct amdgpu_vm *vm = &fpriv->vm; 810 struct amdgpu_vm *vm = &fpriv->vm;
811 struct amdgpu_bo_list_entry *e;
826 struct amdgpu_bo_va *bo_va; 812 struct amdgpu_bo_va *bo_va;
827 struct amdgpu_bo *bo; 813 struct amdgpu_bo *bo;
828 int i, r; 814 int r;
829 815
830 r = amdgpu_vm_clear_freed(adev, vm, NULL); 816 r = amdgpu_vm_clear_freed(adev, vm, NULL);
831 if (r) 817 if (r)
@@ -855,29 +841,26 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
855 return r; 841 return r;
856 } 842 }
857 843
858 if (p->bo_list) { 844 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
859 for (i = 0; i < p->bo_list->num_entries; i++) { 845 struct dma_fence *f;
860 struct dma_fence *f;
861
862 /* ignore duplicates */
863 bo = p->bo_list->array[i].robj;
864 if (!bo)
865 continue;
866 846
867 bo_va = p->bo_list->array[i].bo_va; 847 /* ignore duplicates */
868 if (bo_va == NULL) 848 bo = e->robj;
869 continue; 849 if (!bo)
850 continue;
870 851
871 r = amdgpu_vm_bo_update(adev, bo_va, false); 852 bo_va = e->bo_va;
872 if (r) 853 if (bo_va == NULL)
873 return r; 854 continue;
874 855
875 f = bo_va->last_pt_update; 856 r = amdgpu_vm_bo_update(adev, bo_va, false);
876 r = amdgpu_sync_fence(adev, &p->job->sync, f, false); 857 if (r)
877 if (r) 858 return r;
878 return r;
879 }
880 859
860 f = bo_va->last_pt_update;
861 r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
862 if (r)
863 return r;
881 } 864 }
882 865
883 r = amdgpu_vm_handle_moved(adev, vm); 866 r = amdgpu_vm_handle_moved(adev, vm);
@@ -892,15 +875,14 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
892 if (r) 875 if (r)
893 return r; 876 return r;
894 877
895 if (amdgpu_vm_debug && p->bo_list) { 878 if (amdgpu_vm_debug) {
896 /* Invalidate all BOs to test for userspace bugs */ 879 /* Invalidate all BOs to test for userspace bugs */
897 for (i = 0; i < p->bo_list->num_entries; i++) { 880 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
898 /* ignore duplicates */ 881 /* ignore duplicates */
899 bo = p->bo_list->array[i].robj; 882 if (!e->robj)
900 if (!bo)
901 continue; 883 continue;
902 884
903 amdgpu_vm_bo_invalidate(adev, bo, false); 885 amdgpu_vm_bo_invalidate(adev, e->robj, false);
904 } 886 }
905 } 887 }
906 888
@@ -916,7 +898,7 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
916 int r; 898 int r;
917 899
918 /* Only for UVD/VCE VM emulation */ 900 /* Only for UVD/VCE VM emulation */
919 if (p->ring->funcs->parse_cs) { 901 if (p->ring->funcs->parse_cs || p->ring->funcs->patch_cs_in_place) {
920 unsigned i, j; 902 unsigned i, j;
921 903
922 for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) { 904 for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
@@ -957,12 +939,20 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
957 offset = m->start * AMDGPU_GPU_PAGE_SIZE; 939 offset = m->start * AMDGPU_GPU_PAGE_SIZE;
958 kptr += va_start - offset; 940 kptr += va_start - offset;
959 941
960 memcpy(ib->ptr, kptr, chunk_ib->ib_bytes); 942 if (p->ring->funcs->parse_cs) {
961 amdgpu_bo_kunmap(aobj); 943 memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
962 944 amdgpu_bo_kunmap(aobj);
963 r = amdgpu_ring_parse_cs(ring, p, j); 945
964 if (r) 946 r = amdgpu_ring_parse_cs(ring, p, j);
965 return r; 947 if (r)
948 return r;
949 } else {
950 ib->ptr = (uint32_t *)kptr;
951 r = amdgpu_ring_patch_cs_in_place(ring, p, j);
952 amdgpu_bo_kunmap(aobj);
953 if (r)
954 return r;
955 }
966 956
967 j++; 957 j++;
968 } 958 }
@@ -1207,25 +1197,23 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1207static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, 1197static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1208 union drm_amdgpu_cs *cs) 1198 union drm_amdgpu_cs *cs)
1209{ 1199{
1200 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1210 struct amdgpu_ring *ring = p->ring; 1201 struct amdgpu_ring *ring = p->ring;
1211 struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity; 1202 struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
1212 enum drm_sched_priority priority; 1203 enum drm_sched_priority priority;
1204 struct amdgpu_bo_list_entry *e;
1213 struct amdgpu_job *job; 1205 struct amdgpu_job *job;
1214 unsigned i;
1215 uint64_t seq; 1206 uint64_t seq;
1216 1207
1217 int r; 1208 int r;
1218 1209
1219 amdgpu_mn_lock(p->mn); 1210 amdgpu_mn_lock(p->mn);
1220 if (p->bo_list) { 1211 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1221 for (i = p->bo_list->first_userptr; 1212 struct amdgpu_bo *bo = e->robj;
1222 i < p->bo_list->num_entries; ++i) { 1213
1223 struct amdgpu_bo *bo = p->bo_list->array[i].robj; 1214 if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
1224 1215 amdgpu_mn_unlock(p->mn);
1225 if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { 1216 return -ERESTARTSYS;
1226 amdgpu_mn_unlock(p->mn);
1227 return -ERESTARTSYS;
1228 }
1229 } 1217 }
1230 } 1218 }
1231 1219
@@ -1259,6 +1247,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1259 amdgpu_job_free_resources(job); 1247 amdgpu_job_free_resources(job);
1260 1248
1261 trace_amdgpu_cs_ioctl(job); 1249 trace_amdgpu_cs_ioctl(job);
1250 amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1262 priority = job->base.s_priority; 1251 priority = job->base.s_priority;
1263 drm_sched_entity_push_job(&job->base, entity); 1252 drm_sched_entity_push_job(&job->base, entity);
1264 1253
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index c7dce14fd47d..bd98cc5fb97b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -286,7 +286,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
286 struct drm_crtc *crtc; 286 struct drm_crtc *crtc;
287 uint32_t ui32 = 0; 287 uint32_t ui32 = 0;
288 uint64_t ui64 = 0; 288 uint64_t ui64 = 0;
289 int i, found; 289 int i, j, found;
290 int ui32_size = sizeof(ui32); 290 int ui32_size = sizeof(ui32);
291 291
292 if (!info->return_size || !info->return_pointer) 292 if (!info->return_size || !info->return_pointer)
@@ -348,7 +348,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
348 break; 348 break;
349 case AMDGPU_HW_IP_UVD: 349 case AMDGPU_HW_IP_UVD:
350 type = AMD_IP_BLOCK_TYPE_UVD; 350 type = AMD_IP_BLOCK_TYPE_UVD;
351 ring_mask |= adev->uvd.inst[0].ring.ready; 351 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
352 if (adev->uvd.harvest_config & (1 << i))
353 continue;
354 ring_mask |= adev->uvd.inst[i].ring.ready;
355 }
352 ib_start_alignment = 64; 356 ib_start_alignment = 64;
353 ib_size_alignment = 64; 357 ib_size_alignment = 64;
354 break; 358 break;
@@ -361,9 +365,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
361 break; 365 break;
362 case AMDGPU_HW_IP_UVD_ENC: 366 case AMDGPU_HW_IP_UVD_ENC:
363 type = AMD_IP_BLOCK_TYPE_UVD; 367 type = AMD_IP_BLOCK_TYPE_UVD;
364 for (i = 0; i < adev->uvd.num_enc_rings; i++) 368 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
365 ring_mask |= 369 if (adev->uvd.harvest_config & (1 << i))
366 adev->uvd.inst[0].ring_enc[i].ready << i; 370 continue;
371 for (j = 0; j < adev->uvd.num_enc_rings; j++)
372 ring_mask |= adev->uvd.inst[i].ring_enc[j].ready << j;
373 }
367 ib_start_alignment = 64; 374 ib_start_alignment = 64;
368 ib_size_alignment = 64; 375 ib_size_alignment = 64;
369 break; 376 break;
@@ -960,7 +967,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
960 amdgpu_bo_unref(&pd); 967 amdgpu_bo_unref(&pd);
961 968
962 idr_for_each_entry(&fpriv->bo_list_handles, list, handle) 969 idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
963 amdgpu_bo_list_free(list); 970 amdgpu_bo_list_put(list);
964 971
965 idr_destroy(&fpriv->bo_list_handles); 972 idr_destroy(&fpriv->bo_list_handles);
966 mutex_destroy(&fpriv->bo_list_lock); 973 mutex_destroy(&fpriv->bo_list_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 21bfa2d8039e..b0e14a3d54ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -825,7 +825,7 @@ struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
825 if (bo == NULL) 825 if (bo == NULL)
826 return NULL; 826 return NULL;
827 827
828 ttm_bo_reference(&bo->tbo); 828 ttm_bo_get(&bo->tbo);
829 return bo; 829 return bo;
830} 830}
831 831
@@ -843,9 +843,8 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo)
843 return; 843 return;
844 844
845 tbo = &((*bo)->tbo); 845 tbo = &((*bo)->tbo);
846 ttm_bo_unref(&tbo); 846 ttm_bo_put(tbo);
847 if (tbo == NULL) 847 *bo = NULL;
848 *bo = NULL;
849} 848}
850 849
851/** 850/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 15a1192c1ec5..8f98629fbe59 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -31,7 +31,7 @@
31#include <linux/power_supply.h> 31#include <linux/power_supply.h>
32#include <linux/hwmon.h> 32#include <linux/hwmon.h>
33#include <linux/hwmon-sysfs.h> 33#include <linux/hwmon-sysfs.h>
34 34#include <linux/nospec.h>
35 35
36static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev); 36static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
37 37
@@ -403,6 +403,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
403 count = -EINVAL; 403 count = -EINVAL;
404 goto fail; 404 goto fail;
405 } 405 }
406 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
406 407
407 amdgpu_dpm_get_pp_num_states(adev, &data); 408 amdgpu_dpm_get_pp_num_states(adev, &data);
408 state = data.states[idx]; 409 state = data.states[idx];
@@ -1185,7 +1186,7 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
1185 int r, size = sizeof(vddnb); 1186 int r, size = sizeof(vddnb);
1186 1187
1187 /* only APUs have vddnb */ 1188 /* only APUs have vddnb */
1188 if (adev->flags & AMD_IS_APU) 1189 if (!(adev->flags & AMD_IS_APU))
1189 return -EINVAL; 1190 return -EINVAL;
1190 1191
1191 /* Can't get voltage when the card is off */ 1192 /* Can't get voltage when the card is off */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
index d8357290ad09..a172bba32b45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
@@ -214,7 +214,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
214 u32 hw_ip, u32 instance, u32 ring, 214 u32 hw_ip, u32 instance, u32 ring,
215 struct amdgpu_ring **out_ring) 215 struct amdgpu_ring **out_ring)
216{ 216{
217 int r, ip_num_rings; 217 int i, r, ip_num_rings = 0;
218 struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip]; 218 struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip];
219 219
220 if (!adev || !mgr || !out_ring) 220 if (!adev || !mgr || !out_ring)
@@ -243,14 +243,21 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
243 ip_num_rings = adev->sdma.num_instances; 243 ip_num_rings = adev->sdma.num_instances;
244 break; 244 break;
245 case AMDGPU_HW_IP_UVD: 245 case AMDGPU_HW_IP_UVD:
246 ip_num_rings = adev->uvd.num_uvd_inst; 246 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
247 if (!(adev->uvd.harvest_config & (1 << i)))
248 ip_num_rings++;
249 }
247 break; 250 break;
248 case AMDGPU_HW_IP_VCE: 251 case AMDGPU_HW_IP_VCE:
249 ip_num_rings = adev->vce.num_rings; 252 ip_num_rings = adev->vce.num_rings;
250 break; 253 break;
251 case AMDGPU_HW_IP_UVD_ENC: 254 case AMDGPU_HW_IP_UVD_ENC:
255 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
256 if (!(adev->uvd.harvest_config & (1 << i)))
257 ip_num_rings++;
258 }
252 ip_num_rings = 259 ip_num_rings =
253 adev->uvd.num_enc_rings * adev->uvd.num_uvd_inst; 260 adev->uvd.num_enc_rings * ip_num_rings;
254 break; 261 break;
255 case AMDGPU_HW_IP_VCN_DEC: 262 case AMDGPU_HW_IP_VCN_DEC:
256 ip_num_rings = 1; 263 ip_num_rings = 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 5018c0b6bf1a..d242b9a51e90 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -123,6 +123,7 @@ struct amdgpu_ring_funcs {
123 void (*set_wptr)(struct amdgpu_ring *ring); 123 void (*set_wptr)(struct amdgpu_ring *ring);
124 /* validating and patching of IBs */ 124 /* validating and patching of IBs */
125 int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx); 125 int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
126 int (*patch_cs_in_place)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
126 /* constants to calculate how many DW are needed for an emit */ 127 /* constants to calculate how many DW are needed for an emit */
127 unsigned emit_frame_size; 128 unsigned emit_frame_size;
128 unsigned emit_ib_size; 129 unsigned emit_ib_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 11f262f15200..7206a0025b17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -314,6 +314,11 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping,
314 TP_ARGS(mapping) 314 TP_ARGS(mapping)
315); 315);
316 316
317DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs,
318 TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
319 TP_ARGS(mapping)
320);
321
317TRACE_EVENT(amdgpu_vm_set_ptes, 322TRACE_EVENT(amdgpu_vm_set_ptes,
318 TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, 323 TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
319 uint32_t incr, uint64_t flags), 324 uint32_t incr, uint64_t flags),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 8c4358e36c87..fcf421263fd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -92,11 +92,9 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
92} 92}
93 93
94/** 94/**
95 * amdgpu_ttm_global_init - Initialize global TTM memory reference 95 * amdgpu_ttm_global_init - Initialize global TTM memory reference structures.
96 * structures.
97 * 96 *
98 * @adev: AMDGPU device for which the global structures need to be 97 * @adev: AMDGPU device for which the global structures need to be registered.
99 * registered.
100 * 98 *
101 * This is called as part of the AMDGPU ttm init from amdgpu_ttm_init() 99 * This is called as part of the AMDGPU ttm init from amdgpu_ttm_init()
102 * during bring up. 100 * during bring up.
@@ -162,13 +160,12 @@ static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
162} 160}
163 161
164/** 162/**
165 * amdgpu_init_mem_type - Initialize a memory manager for a specific 163 * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
166 * type of memory request. 164 * memory request.
167 * 165 *
168 * @bdev: The TTM BO device object (contains a reference to 166 * @bdev: The TTM BO device object (contains a reference to amdgpu_device)
169 * amdgpu_device) 167 * @type: The type of memory requested
170 * @type: The type of memory requested 168 * @man: The memory type manager for each domain
171 * @man:
172 * 169 *
173 * This is called by ttm_bo_init_mm() when a buffer object is being 170 * This is called by ttm_bo_init_mm() when a buffer object is being
174 * initialized. 171 * initialized.
@@ -292,8 +289,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
292/** 289/**
293 * amdgpu_verify_access - Verify access for a mmap call 290 * amdgpu_verify_access - Verify access for a mmap call
294 * 291 *
295 * @bo: The buffer object to map 292 * @bo: The buffer object to map
296 * @filp: The file pointer from the process performing the mmap 293 * @filp: The file pointer from the process performing the mmap
297 * 294 *
298 * This is called by ttm_bo_mmap() to verify whether a process 295 * This is called by ttm_bo_mmap() to verify whether a process
299 * has the right to mmap a BO to their process space. 296 * has the right to mmap a BO to their process space.
@@ -318,11 +315,10 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
318/** 315/**
319 * amdgpu_move_null - Register memory for a buffer object 316 * amdgpu_move_null - Register memory for a buffer object
320 * 317 *
321 * @bo: The bo to assign the memory to 318 * @bo: The bo to assign the memory to
322 * @new_mem: The memory to be assigned. 319 * @new_mem: The memory to be assigned.
323 * 320 *
324 * Assign the memory from new_mem to the memory of the buffer object 321 * Assign the memory from new_mem to the memory of the buffer object bo.
325 * bo.
326 */ 322 */
327static void amdgpu_move_null(struct ttm_buffer_object *bo, 323static void amdgpu_move_null(struct ttm_buffer_object *bo,
328 struct ttm_mem_reg *new_mem) 324 struct ttm_mem_reg *new_mem)
@@ -335,8 +331,12 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
335} 331}
336 332
337/** 333/**
338 * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT 334 * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
339 * buffer. 335 *
336 * @bo: The bo to assign the memory to.
337 * @mm_node: Memory manager node for drm allocator.
338 * @mem: The region where the bo resides.
339 *
340 */ 340 */
341static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, 341static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
342 struct drm_mm_node *mm_node, 342 struct drm_mm_node *mm_node,
@@ -352,10 +352,12 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
352} 352}
353 353
354/** 354/**
355 * amdgpu_find_mm_node - Helper function finds the drm_mm_node 355 * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to
356 * corresponding to @offset. It also modifies 356 * @offset. It also modifies the offset to be within the drm_mm_node returned
357 * the offset to be within the drm_mm_node 357 *
358 * returned 358 * @mem: The region where the bo resides.
359 * @offset: The offset that drm_mm_node is used for finding.
360 *
359 */ 361 */
360static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem, 362static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
361 unsigned long *offset) 363 unsigned long *offset)
@@ -497,8 +499,8 @@ error:
497/** 499/**
498 * amdgpu_move_blit - Copy an entire buffer to another buffer 500 * amdgpu_move_blit - Copy an entire buffer to another buffer
499 * 501 *
500 * This is a helper called by amdgpu_bo_move() and 502 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
501 * amdgpu_move_vram_ram() to help move buffers to and from VRAM. 503 * help move buffers to and from VRAM.
502 */ 504 */
503static int amdgpu_move_blit(struct ttm_buffer_object *bo, 505static int amdgpu_move_blit(struct ttm_buffer_object *bo,
504 bool evict, bool no_wait_gpu, 506 bool evict, bool no_wait_gpu,
@@ -580,7 +582,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
580 } 582 }
581 583
582 /* blit VRAM to GTT */ 584 /* blit VRAM to GTT */
583 r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem); 585 r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, &tmp_mem, old_mem);
584 if (unlikely(r)) { 586 if (unlikely(r)) {
585 goto out_cleanup; 587 goto out_cleanup;
586 } 588 }
@@ -632,7 +634,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
632 } 634 }
633 635
634 /* copy to VRAM */ 636 /* copy to VRAM */
635 r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem); 637 r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, new_mem, old_mem);
636 if (unlikely(r)) { 638 if (unlikely(r)) {
637 goto out_cleanup; 639 goto out_cleanup;
638 } 640 }
@@ -794,8 +796,8 @@ struct amdgpu_ttm_tt {
794}; 796};
795 797
796/** 798/**
797 * amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to 799 * amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to by a USERPTR
798 * by a USERPTR pointer to memory 800 * pointer to memory
799 * 801 *
800 * Called by amdgpu_gem_userptr_ioctl() and amdgpu_cs_parser_bos(). 802 * Called by amdgpu_gem_userptr_ioctl() and amdgpu_cs_parser_bos().
801 * This provides a wrapper around the get_user_pages() call to provide 803 * This provides a wrapper around the get_user_pages() call to provide
@@ -818,8 +820,10 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
818 down_read(&mm->mmap_sem); 820 down_read(&mm->mmap_sem);
819 821
820 if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { 822 if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
821 /* check that we only use anonymous memory 823 /*
822 to prevent problems with writeback */ 824 * check that we only use anonymous memory to prevent problems
825 * with writeback
826 */
823 unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; 827 unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
824 struct vm_area_struct *vma; 828 struct vm_area_struct *vma;
825 829
@@ -870,10 +874,9 @@ release_pages:
870} 874}
871 875
872/** 876/**
873 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages 877 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
874 * as necessary.
875 * 878 *
876 * Called by amdgpu_cs_list_validate(). This creates the page list 879 * Called by amdgpu_cs_list_validate(). This creates the page list
877 * that backs user memory and will ultimately be mapped into the device 880 * that backs user memory and will ultimately be mapped into the device
878 * address space. 881 * address space.
879 */ 882 */
@@ -915,8 +918,7 @@ void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
915} 918}
916 919
917/** 920/**
918 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the 921 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
919 * user pages
920 * 922 *
921 * Called by amdgpu_ttm_backend_bind() 923 * Called by amdgpu_ttm_backend_bind()
922 **/ 924 **/
@@ -1295,8 +1297,8 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
1295} 1297}
1296 1298
1297/** 1299/**
1298 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt 1300 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1299 * for the current task 1301 * task
1300 * 1302 *
1301 * @ttm: The ttm_tt object to bind this userptr object to 1303 * @ttm: The ttm_tt object to bind this userptr object to
1302 * @addr: The address in the current tasks VM space to use 1304 * @addr: The address in the current tasks VM space to use
@@ -1346,9 +1348,8 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1346} 1348}
1347 1349
1348/** 1350/**
1349 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays 1351 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1350 * inside an address range for the 1352 * address range for the current task.
1351 * current task.
1352 * 1353 *
1353 */ 1354 */
1354bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, 1355bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
@@ -1386,8 +1387,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1386} 1387}
1387 1388
1388/** 1389/**
1389 * amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been 1390 * amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been invalidated?
1390 * invalidated?
1391 */ 1391 */
1392bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, 1392bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
1393 int *last_invalidated) 1393 int *last_invalidated)
@@ -1400,10 +1400,8 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
1400} 1400}
1401 1401
1402/** 1402/**
1403 * amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this 1403 * amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this ttm_tt object
1404 * ttm_tt object been invalidated 1404 * been invalidated since the last time they've been set?
1405 * since the last time they've
1406 * been set?
1407 */ 1405 */
1408bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm) 1406bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
1409{ 1407{
@@ -1459,13 +1457,12 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1459} 1457}
1460 1458
1461/** 1459/**
1462 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict 1460 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1463 * a buffer object. 1461 * object.
1464 * 1462 *
1465 * Return true if eviction is sensible. Called by 1463 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1466 * ttm_mem_evict_first() on behalf of ttm_bo_mem_force_space() 1464 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1467 * which tries to evict buffer objects until it can find space 1465 * it can find space for a new object and by ttm_bo_force_list_clean() which is
1468 * for a new object and by ttm_bo_force_list_clean() which is
1469 * used to clean out a memory space. 1466 * used to clean out a memory space.
1470 */ 1467 */
1471static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, 1468static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
@@ -1515,8 +1512,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1515} 1512}
1516 1513
1517/** 1514/**
1518 * amdgpu_ttm_access_memory - Read or Write memory that backs a 1515 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1519 * buffer object.
1520 * 1516 *
1521 * @bo: The buffer object to read/write 1517 * @bo: The buffer object to read/write
1522 * @offset: Offset into buffer object 1518 * @offset: Offset into buffer object
@@ -1704,8 +1700,8 @@ error_create:
1704 return r; 1700 return r;
1705} 1701}
1706/** 1702/**
1707 * amdgpu_ttm_init - Init the memory management (ttm) as well as 1703 * amdgpu_ttm_init - Init the memory management (ttm) as well as various
1708 * various gtt/vram related fields. 1704 * gtt/vram related fields.
1709 * 1705 *
1710 * This initializes all of the memory space pools that the TTM layer 1706 * This initializes all of the memory space pools that the TTM layer
1711 * will need such as the GTT space (system memory mapped to the device), 1707 * will need such as the GTT space (system memory mapped to the device),
@@ -1856,8 +1852,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
1856} 1852}
1857 1853
1858/** 1854/**
1859 * amdgpu_ttm_late_init - Handle any late initialization for 1855 * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm
1860 * amdgpu_ttm
1861 */ 1856 */
1862void amdgpu_ttm_late_init(struct amdgpu_device *adev) 1857void amdgpu_ttm_late_init(struct amdgpu_device *adev)
1863{ 1858{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index fca86d71fafc..632fa5980ff4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -255,7 +255,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
255 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); 255 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
256 256
257 for (j = 0; j < adev->uvd.num_uvd_inst; j++) { 257 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
258 258 if (adev->uvd.harvest_config & (1 << j))
259 continue;
259 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, 260 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
260 AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo, 261 AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
261 &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr); 262 &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
@@ -308,6 +309,8 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
308 drm_sched_entity_destroy(&adev->uvd.entity); 309 drm_sched_entity_destroy(&adev->uvd.entity);
309 310
310 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 311 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
312 if (adev->uvd.harvest_config & (1 << j))
313 continue;
311 kfree(adev->uvd.inst[j].saved_bo); 314 kfree(adev->uvd.inst[j].saved_bo);
312 315
313 amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo, 316 amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
@@ -343,6 +346,8 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
343 } 346 }
344 347
345 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 348 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
349 if (adev->uvd.harvest_config & (1 << j))
350 continue;
346 if (adev->uvd.inst[j].vcpu_bo == NULL) 351 if (adev->uvd.inst[j].vcpu_bo == NULL)
347 continue; 352 continue;
348 353
@@ -365,6 +370,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
365 int i; 370 int i;
366 371
367 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { 372 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
373 if (adev->uvd.harvest_config & (1 << i))
374 continue;
368 if (adev->uvd.inst[i].vcpu_bo == NULL) 375 if (adev->uvd.inst[i].vcpu_bo == NULL)
369 return -EINVAL; 376 return -EINVAL;
370 377
@@ -1159,6 +1166,8 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1159 unsigned fences = 0, i, j; 1166 unsigned fences = 0, i, j;
1160 1167
1161 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { 1168 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1169 if (adev->uvd.harvest_config & (1 << i))
1170 continue;
1162 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring); 1171 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
1163 for (j = 0; j < adev->uvd.num_enc_rings; ++j) { 1172 for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
1164 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]); 1173 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 66872286ab12..33c5f806f925 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -48,6 +48,9 @@ struct amdgpu_uvd_inst {
48 uint32_t srbm_soft_reset; 48 uint32_t srbm_soft_reset;
49}; 49};
50 50
51#define AMDGPU_UVD_HARVEST_UVD0 (1 << 0)
52#define AMDGPU_UVD_HARVEST_UVD1 (1 << 1)
53
51struct amdgpu_uvd { 54struct amdgpu_uvd {
52 const struct firmware *fw; /* UVD firmware */ 55 const struct firmware *fw; /* UVD firmware */
53 unsigned fw_version; 56 unsigned fw_version;
@@ -61,6 +64,7 @@ struct amdgpu_uvd {
61 atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; 64 atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
62 struct drm_sched_entity entity; 65 struct drm_sched_entity entity;
63 struct delayed_work idle_work; 66 struct delayed_work idle_work;
67 unsigned harvest_config;
64}; 68};
65 69
66int amdgpu_uvd_sw_init(struct amdgpu_device *adev); 70int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 9eedc9810004..ece0ac703e27 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2345,6 +2345,35 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
2345} 2345}
2346 2346
2347/** 2347/**
2348 * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2349 *
2350 * @vm: the requested vm
2351 * @ticket: CS ticket
2352 *
2353 * Trace all mappings of BOs reserved during a command submission.
2354 */
2355void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
2356{
2357 struct amdgpu_bo_va_mapping *mapping;
2358
2359 if (!trace_amdgpu_vm_bo_cs_enabled())
2360 return;
2361
2362 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2363 mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
2364 if (mapping->bo_va && mapping->bo_va->base.bo) {
2365 struct amdgpu_bo *bo;
2366
2367 bo = mapping->bo_va->base.bo;
2368 if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
2369 continue;
2370 }
2371
2372 trace_amdgpu_vm_bo_cs(mapping);
2373 }
2374}
2375
2376/**
2348 * amdgpu_vm_bo_rmv - remove a bo to a specific vm 2377 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
2349 * 2378 *
2350 * @adev: amdgpu_device pointer 2379 * @adev: amdgpu_device pointer
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index d416f895233d..67a15d439ac0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -318,6 +318,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
318 uint64_t saddr, uint64_t size); 318 uint64_t saddr, uint64_t size);
319struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, 319struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
320 uint64_t addr); 320 uint64_t addr);
321void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
321void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, 322void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
322 struct amdgpu_bo_va *bo_va); 323 struct amdgpu_bo_va *bo_va);
323void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, 324void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index db5f3d78ab12..5fab3560a71d 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -41,6 +41,12 @@
41#include "mmhub/mmhub_1_0_sh_mask.h" 41#include "mmhub/mmhub_1_0_sh_mask.h"
42#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h" 42#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
43 43
44#define mmUVD_PG0_CC_UVD_HARVESTING 0x00c7
45#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX 1
46//UVD_PG0_CC_UVD_HARVESTING
47#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
48#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
49
44#define UVD7_MAX_HW_INSTANCES_VEGA20 2 50#define UVD7_MAX_HW_INSTANCES_VEGA20 2
45 51
46static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev); 52static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -370,10 +376,25 @@ error:
370static int uvd_v7_0_early_init(void *handle) 376static int uvd_v7_0_early_init(void *handle)
371{ 377{
372 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 378 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
373 if (adev->asic_type == CHIP_VEGA20) 379
380 if (adev->asic_type == CHIP_VEGA20) {
381 u32 harvest;
382 int i;
383
374 adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20; 384 adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
375 else 385 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
386 harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
387 if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
388 adev->uvd.harvest_config |= 1 << i;
389 }
390 }
391 if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
392 AMDGPU_UVD_HARVEST_UVD1))
393 /* both instances are harvested, disable the block */
394 return -ENOENT;
395 } else {
376 adev->uvd.num_uvd_inst = 1; 396 adev->uvd.num_uvd_inst = 1;
397 }
377 398
378 if (amdgpu_sriov_vf(adev)) 399 if (amdgpu_sriov_vf(adev))
379 adev->uvd.num_enc_rings = 1; 400 adev->uvd.num_enc_rings = 1;
@@ -393,6 +414,8 @@ static int uvd_v7_0_sw_init(void *handle)
393 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 414 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
394 415
395 for (j = 0; j < adev->uvd.num_uvd_inst; j++) { 416 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
417 if (adev->uvd.harvest_config & (1 << j))
418 continue;
396 /* UVD TRAP */ 419 /* UVD TRAP */
397 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq); 420 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
398 if (r) 421 if (r)
@@ -425,6 +448,8 @@ static int uvd_v7_0_sw_init(void *handle)
425 return r; 448 return r;
426 449
427 for (j = 0; j < adev->uvd.num_uvd_inst; j++) { 450 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
451 if (adev->uvd.harvest_config & (1 << j))
452 continue;
428 if (!amdgpu_sriov_vf(adev)) { 453 if (!amdgpu_sriov_vf(adev)) {
429 ring = &adev->uvd.inst[j].ring; 454 ring = &adev->uvd.inst[j].ring;
430 sprintf(ring->name, "uvd<%d>", j); 455 sprintf(ring->name, "uvd<%d>", j);
@@ -472,6 +497,8 @@ static int uvd_v7_0_sw_fini(void *handle)
472 return r; 497 return r;
473 498
474 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 499 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
500 if (adev->uvd.harvest_config & (1 << j))
501 continue;
475 for (i = 0; i < adev->uvd.num_enc_rings; ++i) 502 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
476 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); 503 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
477 } 504 }
@@ -500,6 +527,8 @@ static int uvd_v7_0_hw_init(void *handle)
500 goto done; 527 goto done;
501 528
502 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 529 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
530 if (adev->uvd.harvest_config & (1 << j))
531 continue;
503 ring = &adev->uvd.inst[j].ring; 532 ring = &adev->uvd.inst[j].ring;
504 533
505 if (!amdgpu_sriov_vf(adev)) { 534 if (!amdgpu_sriov_vf(adev)) {
@@ -579,8 +608,11 @@ static int uvd_v7_0_hw_fini(void *handle)
579 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); 608 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
580 } 609 }
581 610
582 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) 611 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
612 if (adev->uvd.harvest_config & (1 << i))
613 continue;
583 adev->uvd.inst[i].ring.ready = false; 614 adev->uvd.inst[i].ring.ready = false;
615 }
584 616
585 return 0; 617 return 0;
586} 618}
@@ -623,6 +655,8 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
623 int i; 655 int i;
624 656
625 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { 657 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
658 if (adev->uvd.harvest_config & (1 << i))
659 continue;
626 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 660 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
627 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 661 WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
628 lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); 662 lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
@@ -695,6 +729,8 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
695 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0); 729 WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
696 730
697 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { 731 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
732 if (adev->uvd.harvest_config & (1 << i))
733 continue;
698 WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0); 734 WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
699 adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0; 735 adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
700 adev->uvd.inst[i].ring_enc[0].wptr = 0; 736 adev->uvd.inst[i].ring_enc[0].wptr = 0;
@@ -751,6 +787,8 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
751 init_table += header->uvd_table_offset; 787 init_table += header->uvd_table_offset;
752 788
753 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { 789 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
790 if (adev->uvd.harvest_config & (1 << i))
791 continue;
754 ring = &adev->uvd.inst[i].ring; 792 ring = &adev->uvd.inst[i].ring;
755 ring->wptr = 0; 793 ring->wptr = 0;
756 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); 794 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
@@ -890,6 +928,8 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
890 int i, j, k, r; 928 int i, j, k, r;
891 929
892 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) { 930 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
931 if (adev->uvd.harvest_config & (1 << k))
932 continue;
893 /* disable DPG */ 933 /* disable DPG */
894 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0, 934 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
895 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); 935 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
@@ -902,6 +942,8 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
902 uvd_v7_0_mc_resume(adev); 942 uvd_v7_0_mc_resume(adev);
903 943
904 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) { 944 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
945 if (adev->uvd.harvest_config & (1 << k))
946 continue;
905 ring = &adev->uvd.inst[k].ring; 947 ring = &adev->uvd.inst[k].ring;
906 /* disable clock gating */ 948 /* disable clock gating */
907 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0, 949 WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
@@ -1069,6 +1111,8 @@ static void uvd_v7_0_stop(struct amdgpu_device *adev)
1069 uint8_t i = 0; 1111 uint8_t i = 0;
1070 1112
1071 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { 1113 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
1114 if (adev->uvd.harvest_config & (1 << i))
1115 continue;
1072 /* force RBC into idle state */ 1116 /* force RBC into idle state */
1073 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101); 1117 WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
1074 1118
@@ -1206,6 +1250,34 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
1206} 1250}
1207 1251
1208/** 1252/**
1253 * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
1254 *
1255 * @p: the CS parser with the IBs
1256 * @ib_idx: which IB to patch
1257 *
1258 */
1259static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1260 uint32_t ib_idx)
1261{
1262 struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
1263 unsigned i;
1264
1265 /* No patching necessary for the first instance */
1266 if (!p->ring->me)
1267 return 0;
1268
1269 for (i = 0; i < ib->length_dw; i += 2) {
1270 uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
1271
1272 reg -= p->adev->reg_offset[UVD_HWIP][0][1];
1273 reg += p->adev->reg_offset[UVD_HWIP][1][1];
1274
1275 amdgpu_set_ib_value(p, ib_idx, i, reg);
1276 }
1277 return 0;
1278}
1279
1280/**
1209 * uvd_v7_0_ring_emit_ib - execute indirect buffer 1281 * uvd_v7_0_ring_emit_ib - execute indirect buffer
1210 * 1282 *
1211 * @ring: amdgpu_ring pointer 1283 * @ring: amdgpu_ring pointer
@@ -1697,6 +1769,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
1697 .get_rptr = uvd_v7_0_ring_get_rptr, 1769 .get_rptr = uvd_v7_0_ring_get_rptr,
1698 .get_wptr = uvd_v7_0_ring_get_wptr, 1770 .get_wptr = uvd_v7_0_ring_get_wptr,
1699 .set_wptr = uvd_v7_0_ring_set_wptr, 1771 .set_wptr = uvd_v7_0_ring_set_wptr,
1772 .patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
1700 .emit_frame_size = 1773 .emit_frame_size =
1701 6 + /* hdp invalidate */ 1774 6 + /* hdp invalidate */
1702 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 1775 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
@@ -1756,6 +1829,8 @@ static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1756 int i; 1829 int i;
1757 1830
1758 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { 1831 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1832 if (adev->uvd.harvest_config & (1 << i))
1833 continue;
1759 adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs; 1834 adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
1760 adev->uvd.inst[i].ring.me = i; 1835 adev->uvd.inst[i].ring.me = i;
1761 DRM_INFO("UVD(%d) is enabled in VM mode\n", i); 1836 DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
@@ -1767,6 +1842,8 @@ static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1767 int i, j; 1842 int i, j;
1768 1843
1769 for (j = 0; j < adev->uvd.num_uvd_inst; j++) { 1844 for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
1845 if (adev->uvd.harvest_config & (1 << j))
1846 continue;
1770 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { 1847 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
1771 adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs; 1848 adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
1772 adev->uvd.inst[j].ring_enc[i].me = j; 1849 adev->uvd.inst[j].ring_enc[i].me = j;
@@ -1786,6 +1863,8 @@ static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1786 int i; 1863 int i;
1787 1864
1788 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { 1865 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
1866 if (adev->uvd.harvest_config & (1 << i))
1867 continue;
1789 adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1; 1868 adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
1790 adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs; 1869 adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
1791 } 1870 }
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 388a0635c38d..966d2f9c8c99 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -60,7 +60,14 @@
60 60
61enum { 61enum {
62 LINK_RATE_REF_FREQ_IN_MHZ = 27, 62 LINK_RATE_REF_FREQ_IN_MHZ = 27,
63 PEAK_FACTOR_X1000 = 1006 63 PEAK_FACTOR_X1000 = 1006,
64 /*
65 * Some receivers fail to train on first try and are good
66 * on subsequent tries. 2 retries should be plenty. If we
67 * don't have a successful training then we don't expect to
68 * ever get one.
69 */
70 LINK_TRAINING_MAX_VERIFY_RETRY = 2
64}; 71};
65 72
66/******************************************************************************* 73/*******************************************************************************
@@ -760,7 +767,16 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
760 */ 767 */
761 768
762 /* deal with non-mst cases */ 769 /* deal with non-mst cases */
763 dp_verify_link_cap(link, &link->reported_link_cap); 770 for (i = 0; i < LINK_TRAINING_MAX_VERIFY_RETRY; i++) {
771 int fail_count = 0;
772
773 dp_verify_link_cap(link,
774 &link->reported_link_cap,
775 &fail_count);
776
777 if (fail_count == 0)
778 break;
779 }
764 } 780 }
765 781
766 /* HDMI-DVI Dongle */ 782 /* HDMI-DVI Dongle */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 4019fe07d291..8def0d9fa0ff 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -33,7 +33,6 @@
33#include "include/vector.h" 33#include "include/vector.h"
34#include "core_types.h" 34#include "core_types.h"
35#include "dc_link_ddc.h" 35#include "dc_link_ddc.h"
36#include "engine.h"
37#include "aux_engine.h" 36#include "aux_engine.h"
38 37
39#define AUX_POWER_UP_WA_DELAY 500 38#define AUX_POWER_UP_WA_DELAY 500
@@ -640,7 +639,6 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
640 enum i2caux_transaction_action action) 639 enum i2caux_transaction_action action)
641{ 640{
642 struct ddc *ddc_pin = ddc->ddc_pin; 641 struct ddc *ddc_pin = ddc->ddc_pin;
643 struct engine *engine;
644 struct aux_engine *aux_engine; 642 struct aux_engine *aux_engine;
645 enum aux_channel_operation_result operation_result; 643 enum aux_channel_operation_result operation_result;
646 struct aux_request_transaction_data aux_req; 644 struct aux_request_transaction_data aux_req;
@@ -652,8 +650,8 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
652 memset(&aux_req, 0, sizeof(aux_req)); 650 memset(&aux_req, 0, sizeof(aux_req));
653 memset(&aux_rep, 0, sizeof(aux_rep)); 651 memset(&aux_rep, 0, sizeof(aux_rep));
654 652
655 engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; 653 aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
656 aux_engine = engine->funcs->acquire(engine, ddc_pin); 654 aux_engine->funcs->acquire(aux_engine, ddc_pin);
657 655
658 aux_req.type = type; 656 aux_req.type = type;
659 aux_req.action = action; 657 aux_req.action = action;
@@ -685,7 +683,7 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
685 res = -1; 683 res = -1;
686 break; 684 break;
687 } 685 }
688 aux_engine->base.funcs->release_engine(&aux_engine->base); 686 aux_engine->funcs->release_engine(aux_engine);
689 return res; 687 return res;
690} 688}
691 689
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index af9386ee5a93..160841da72a7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1088,7 +1088,8 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
1088 1088
1089bool dp_verify_link_cap( 1089bool dp_verify_link_cap(
1090 struct dc_link *link, 1090 struct dc_link *link,
1091 struct dc_link_settings *known_limit_link_setting) 1091 struct dc_link_settings *known_limit_link_setting,
1092 int *fail_count)
1092{ 1093{
1093 struct dc_link_settings max_link_cap = {0}; 1094 struct dc_link_settings max_link_cap = {0};
1094 struct dc_link_settings cur_link_setting = {0}; 1095 struct dc_link_settings cur_link_setting = {0};
@@ -1160,6 +1161,8 @@ bool dp_verify_link_cap(
1160 skip_video_pattern); 1161 skip_video_pattern);
1161 if (status == LINK_TRAINING_SUCCESS) 1162 if (status == LINK_TRAINING_SUCCESS)
1162 success = true; 1163 success = true;
1164 else
1165 (*fail_count)++;
1163 } 1166 }
1164 1167
1165 if (success) 1168 if (success)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index 815dfb50089b..8fb3aefd195c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -192,7 +192,7 @@ void dc_transfer_func_release(struct dc_transfer_func *tf)
192 kref_put(&tf->refcount, dc_transfer_func_free); 192 kref_put(&tf->refcount, dc_transfer_func_free);
193} 193}
194 194
195struct dc_transfer_func *dc_create_transfer_func() 195struct dc_transfer_func *dc_create_transfer_func(void)
196{ 196{
197 struct dc_transfer_func *tf = kvzalloc(sizeof(*tf), GFP_KERNEL); 197 struct dc_transfer_func *tf = kvzalloc(sizeof(*tf), GFP_KERNEL);
198 198
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index b91f14989aef..55bcc3bdc6a3 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
38#include "inc/compressor.h" 38#include "inc/compressor.h"
39#include "dml/display_mode_lib.h" 39#include "dml/display_mode_lib.h"
40 40
41#define DC_VER "3.1.58" 41#define DC_VER "3.1.59"
42 42
43#define MAX_SURFACES 3 43#define MAX_SURFACES 3
44#define MAX_STREAMS 6 44#define MAX_STREAMS 6
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index b28e2120767e..3f5b2e6f7553 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -28,12 +28,12 @@
28#include "dce/dce_11_0_sh_mask.h" 28#include "dce/dce_11_0_sh_mask.h"
29 29
30#define CTX \ 30#define CTX \
31 aux110->base.base.ctx 31 aux110->base.ctx
32#define REG(reg_name)\ 32#define REG(reg_name)\
33 (aux110->regs->reg_name) 33 (aux110->regs->reg_name)
34 34
35#define DC_LOGGER \ 35#define DC_LOGGER \
36 engine->base.ctx->logger 36 engine->ctx->logger
37 37
38#include "reg_helper.h" 38#include "reg_helper.h"
39 39
@@ -51,9 +51,9 @@ enum {
51 AUX_DEFER_RETRY_COUNTER = 6 51 AUX_DEFER_RETRY_COUNTER = 6
52}; 52};
53static void release_engine( 53static void release_engine(
54 struct engine *engine) 54 struct aux_engine *engine)
55{ 55{
56 struct aux_engine_dce110 *aux110 = FROM_ENGINE(engine); 56 struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
57 57
58 dal_ddc_close(engine->ddc); 58 dal_ddc_close(engine->ddc);
59 59
@@ -827,22 +827,21 @@ static bool end_of_transaction_command(
827 827
828 /* according Syed, it does not need now DoDummyMOT */ 828 /* according Syed, it does not need now DoDummyMOT */
829} 829}
830bool submit_request( 830static bool submit_request(
831 struct engine *engine, 831 struct aux_engine *engine,
832 struct i2caux_transaction_request *request, 832 struct i2caux_transaction_request *request,
833 bool middle_of_transaction) 833 bool middle_of_transaction)
834{ 834{
835 struct aux_engine *aux_engine = FROM_AUX_ENGINE_ENGINE(engine);
836 835
837 bool result; 836 bool result;
838 bool mot_used = true; 837 bool mot_used = true;
839 838
840 switch (request->operation) { 839 switch (request->operation) {
841 case I2CAUX_TRANSACTION_READ: 840 case I2CAUX_TRANSACTION_READ:
842 result = read_command(aux_engine, request, mot_used); 841 result = read_command(engine, request, mot_used);
843 break; 842 break;
844 case I2CAUX_TRANSACTION_WRITE: 843 case I2CAUX_TRANSACTION_WRITE:
845 result = write_command(aux_engine, request, mot_used); 844 result = write_command(engine, request, mot_used);
846 break; 845 break;
847 default: 846 default:
848 result = false; 847 result = false;
@@ -854,45 +853,45 @@ bool submit_request(
854 */ 853 */
855 854
856 if (!middle_of_transaction || !result) 855 if (!middle_of_transaction || !result)
857 end_of_transaction_command(aux_engine, request); 856 end_of_transaction_command(engine, request);
858 857
859 /* mask AUX interrupt */ 858 /* mask AUX interrupt */
860 859
861 return result; 860 return result;
862} 861}
863enum i2caux_engine_type get_engine_type( 862enum i2caux_engine_type get_engine_type(
864 const struct engine *engine) 863 const struct aux_engine *engine)
865{ 864{
866 return I2CAUX_ENGINE_TYPE_AUX; 865 return I2CAUX_ENGINE_TYPE_AUX;
867} 866}
868 867
869static struct aux_engine *acquire( 868static bool acquire(
870 struct engine *engine, 869 struct aux_engine *engine,
871 struct ddc *ddc) 870 struct ddc *ddc)
872{ 871{
873 struct aux_engine *aux_engine = FROM_AUX_ENGINE_ENGINE(engine); 872
874 enum gpio_result result; 873 enum gpio_result result;
875 874
876 if (aux_engine->funcs->is_engine_available) { 875 if (engine->funcs->is_engine_available) {
877 /*check whether SW could use the engine*/ 876 /*check whether SW could use the engine*/
878 if (!aux_engine->funcs->is_engine_available(aux_engine)) 877 if (!engine->funcs->is_engine_available(engine))
879 return NULL; 878 return false;
880 } 879 }
881 880
882 result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE, 881 result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
883 GPIO_DDC_CONFIG_TYPE_MODE_AUX); 882 GPIO_DDC_CONFIG_TYPE_MODE_AUX);
884 883
885 if (result != GPIO_RESULT_OK) 884 if (result != GPIO_RESULT_OK)
886 return NULL; 885 return false;
887 886
888 if (!aux_engine->funcs->acquire_engine(aux_engine)) { 887 if (!engine->funcs->acquire_engine(engine)) {
889 dal_ddc_close(ddc); 888 dal_ddc_close(ddc);
890 return NULL; 889 return false;
891 } 890 }
892 891
893 engine->ddc = ddc; 892 engine->ddc = ddc;
894 893
895 return aux_engine; 894 return true;
896} 895}
897 896
898static const struct aux_engine_funcs aux_engine_funcs = { 897static const struct aux_engine_funcs aux_engine_funcs = {
@@ -902,9 +901,6 @@ static const struct aux_engine_funcs aux_engine_funcs = {
902 .read_channel_reply = read_channel_reply, 901 .read_channel_reply = read_channel_reply,
903 .get_channel_status = get_channel_status, 902 .get_channel_status = get_channel_status,
904 .is_engine_available = is_engine_available, 903 .is_engine_available = is_engine_available,
905};
906
907static const struct engine_funcs engine_funcs = {
908 .release_engine = release_engine, 904 .release_engine = release_engine,
909 .destroy_engine = dce110_engine_destroy, 905 .destroy_engine = dce110_engine_destroy,
910 .submit_request = submit_request, 906 .submit_request = submit_request,
@@ -912,10 +908,10 @@ static const struct engine_funcs engine_funcs = {
912 .acquire = acquire, 908 .acquire = acquire,
913}; 909};
914 910
915void dce110_engine_destroy(struct engine **engine) 911void dce110_engine_destroy(struct aux_engine **engine)
916{ 912{
917 913
918 struct aux_engine_dce110 *engine110 = FROM_ENGINE(*engine); 914 struct aux_engine_dce110 *engine110 = FROM_AUX_ENGINE(*engine);
919 915
920 kfree(engine110); 916 kfree(engine110);
921 *engine = NULL; 917 *engine = NULL;
@@ -927,13 +923,12 @@ struct aux_engine *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_eng
927 uint32_t timeout_period, 923 uint32_t timeout_period,
928 const struct dce110_aux_registers *regs) 924 const struct dce110_aux_registers *regs)
929{ 925{
930 aux_engine110->base.base.ddc = NULL; 926 aux_engine110->base.ddc = NULL;
931 aux_engine110->base.base.ctx = ctx; 927 aux_engine110->base.ctx = ctx;
932 aux_engine110->base.delay = 0; 928 aux_engine110->base.delay = 0;
933 aux_engine110->base.max_defer_write_retry = 0; 929 aux_engine110->base.max_defer_write_retry = 0;
934 aux_engine110->base.base.funcs = &engine_funcs;
935 aux_engine110->base.funcs = &aux_engine_funcs; 930 aux_engine110->base.funcs = &aux_engine_funcs;
936 aux_engine110->base.base.inst = inst; 931 aux_engine110->base.inst = inst;
937 aux_engine110->timeout_period = timeout_period; 932 aux_engine110->timeout_period = timeout_period;
938 aux_engine110->regs = regs; 933 aux_engine110->regs = regs;
939 934
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
index c6b2aec2e367..f7caab85dc80 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -103,9 +103,9 @@ struct aux_engine *dce110_aux_engine_construct(
103 uint32_t timeout_period, 103 uint32_t timeout_period,
104 const struct dce110_aux_registers *regs); 104 const struct dce110_aux_registers *regs);
105 105
106void dce110_engine_destroy(struct engine **engine); 106void dce110_engine_destroy(struct aux_engine **engine);
107 107
108bool dce110_aux_engine_acquire( 108bool dce110_aux_engine_acquire(
109 struct engine *aux_engine, 109 struct aux_engine *aux_engine,
110 struct ddc *ddc); 110 struct ddc *ddc);
111#endif 111#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index 062a46543887..ca7989e4932b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -150,7 +150,7 @@ static void dce_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
150 } 150 }
151} 151}
152 152
153static void dce_dmcu_setup_psr(struct dmcu *dmcu, 153static bool dce_dmcu_setup_psr(struct dmcu *dmcu,
154 struct dc_link *link, 154 struct dc_link *link,
155 struct psr_context *psr_context) 155 struct psr_context *psr_context)
156{ 156{
@@ -261,6 +261,8 @@ static void dce_dmcu_setup_psr(struct dmcu *dmcu,
261 261
262 /* notifyDMCUMsg */ 262 /* notifyDMCUMsg */
263 REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); 263 REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
264
265 return true;
264} 266}
265 267
266static bool dce_is_dmcu_initialized(struct dmcu *dmcu) 268static bool dce_is_dmcu_initialized(struct dmcu *dmcu)
@@ -545,24 +547,25 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
545 * least a few frames. Should never hit the max retry assert below. 547 * least a few frames. Should never hit the max retry assert below.
546 */ 548 */
547 if (wait == true) { 549 if (wait == true) {
548 for (retryCount = 0; retryCount <= 1000; retryCount++) { 550 for (retryCount = 0; retryCount <= 1000; retryCount++) {
549 dcn10_get_dmcu_psr_state(dmcu, &psr_state); 551 dcn10_get_dmcu_psr_state(dmcu, &psr_state);
550 if (enable) { 552 if (enable) {
551 if (psr_state != 0) 553 if (psr_state != 0)
552 break; 554 break;
553 } else { 555 } else {
554 if (psr_state == 0) 556 if (psr_state == 0)
555 break; 557 break;
558 }
559 udelay(500);
556 } 560 }
557 udelay(500);
558 }
559 561
560 /* assert if max retry hit */ 562 /* assert if max retry hit */
561 ASSERT(retryCount <= 1000); 563 if (retryCount >= 1000)
564 ASSERT(0);
562 } 565 }
563} 566}
564 567
565static void dcn10_dmcu_setup_psr(struct dmcu *dmcu, 568static bool dcn10_dmcu_setup_psr(struct dmcu *dmcu,
566 struct dc_link *link, 569 struct dc_link *link,
567 struct psr_context *psr_context) 570 struct psr_context *psr_context)
568{ 571{
@@ -577,7 +580,7 @@ static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
577 580
578 /* If microcontroller is not running, do nothing */ 581 /* If microcontroller is not running, do nothing */
579 if (dmcu->dmcu_state != DMCU_RUNNING) 582 if (dmcu->dmcu_state != DMCU_RUNNING)
580 return; 583 return false;
581 584
582 link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc, 585 link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc,
583 psr_context->psrExitLinkTrainingRequired); 586 psr_context->psrExitLinkTrainingRequired);
@@ -677,6 +680,11 @@ static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
677 680
678 /* notifyDMCUMsg */ 681 /* notifyDMCUMsg */
679 REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); 682 REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
683
684 /* waitDMCUReadyForCmd */
685 REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
686
687 return true;
680} 688}
681 689
682static void dcn10_psr_wait_loop( 690static void dcn10_psr_wait_loop(
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index c34c9531915e..fd2bdae4dcec 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -586,7 +586,7 @@ struct output_pixel_processor *dce100_opp_create(
586 return &opp->base; 586 return &opp->base;
587} 587}
588 588
589struct engine *dce100_aux_engine_create( 589struct aux_engine *dce100_aux_engine_create(
590 struct dc_context *ctx, 590 struct dc_context *ctx,
591 uint32_t inst) 591 uint32_t inst)
592{ 592{
@@ -600,7 +600,7 @@ struct engine *dce100_aux_engine_create(
600 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, 600 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
601 &aux_engine_regs[inst]); 601 &aux_engine_regs[inst]);
602 602
603 return &aux_engine->base.base; 603 return &aux_engine->base;
604} 604}
605 605
606struct clock_source *dce100_clock_source_create( 606struct clock_source *dce100_clock_source_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 33a14e163f88..1149c413f6d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -2552,7 +2552,7 @@ static void pplib_apply_display_requirements(
2552 dc->prev_display_config = *pp_display_cfg; 2552 dc->prev_display_config = *pp_display_cfg;
2553} 2553}
2554 2554
2555static void dce110_set_bandwidth( 2555void dce110_set_bandwidth(
2556 struct dc *dc, 2556 struct dc *dc,
2557 struct dc_state *context, 2557 struct dc_state *context,
2558 bool decrease_allowed) 2558 bool decrease_allowed)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index d6db3dbd9015..e4c5db75c4c6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -68,6 +68,11 @@ void dce110_fill_display_configs(
68 const struct dc_state *context, 68 const struct dc_state *context,
69 struct dm_pp_display_configuration *pp_display_cfg); 69 struct dm_pp_display_configuration *pp_display_cfg);
70 70
71void dce110_set_bandwidth(
72 struct dc *dc,
73 struct dc_state *context,
74 bool decrease_allowed);
75
71uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context); 76uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
72 77
73void dp_receiver_power_ctrl(struct dc_link *link, bool on); 78void dp_receiver_power_ctrl(struct dc_link *link, bool on);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 4a665a29191b..e5e9e92521e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -604,7 +604,7 @@ static struct output_pixel_processor *dce110_opp_create(
604 return &opp->base; 604 return &opp->base;
605} 605}
606 606
607struct engine *dce110_aux_engine_create( 607struct aux_engine *dce110_aux_engine_create(
608 struct dc_context *ctx, 608 struct dc_context *ctx,
609 uint32_t inst) 609 uint32_t inst)
610{ 610{
@@ -618,7 +618,7 @@ struct engine *dce110_aux_engine_create(
618 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, 618 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
619 &aux_engine_regs[inst]); 619 &aux_engine_regs[inst]);
620 620
621 return &aux_engine->base.base; 621 return &aux_engine->base;
622} 622}
623 623
624struct clock_source *dce110_clock_source_create( 624struct clock_source *dce110_clock_source_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index caf90ae2cbb0..84a05ff2d674 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -604,7 +604,7 @@ struct output_pixel_processor *dce112_opp_create(
604 return &opp->base; 604 return &opp->base;
605} 605}
606 606
607struct engine *dce112_aux_engine_create( 607struct aux_engine *dce112_aux_engine_create(
608 struct dc_context *ctx, 608 struct dc_context *ctx,
609 uint32_t inst) 609 uint32_t inst)
610{ 610{
@@ -618,7 +618,7 @@ struct engine *dce112_aux_engine_create(
618 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, 618 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
619 &aux_engine_regs[inst]); 619 &aux_engine_regs[inst]);
620 620
621 return &aux_engine->base.base; 621 return &aux_engine->base;
622} 622}
623 623
624struct clock_source *dce112_clock_source_create( 624struct clock_source *dce112_clock_source_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
index e96ff86d2fc3..5853522a6182 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
@@ -244,7 +244,16 @@ static void dce120_update_dchub(
244 dh_data->dchub_info_valid = false; 244 dh_data->dchub_info_valid = false;
245} 245}
246 246
247static void dce120_set_bandwidth(
248 struct dc *dc,
249 struct dc_state *context,
250 bool decrease_allowed)
251{
252 if (context->stream_count <= 0)
253 return;
247 254
255 dce110_set_bandwidth(dc, context, decrease_allowed);
256}
248 257
249void dce120_hw_sequencer_construct(struct dc *dc) 258void dce120_hw_sequencer_construct(struct dc *dc)
250{ 259{
@@ -254,5 +263,6 @@ void dce120_hw_sequencer_construct(struct dc *dc)
254 dce110_hw_sequencer_construct(dc); 263 dce110_hw_sequencer_construct(dc);
255 dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating; 264 dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
256 dc->hwss.update_dchub = dce120_update_dchub; 265 dc->hwss.update_dchub = dce120_update_dchub;
266 dc->hwss.set_bandwidth = dce120_set_bandwidth;
257} 267}
258 268
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index f7d02f2190d3..61d8e22d23c9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -376,7 +376,7 @@ struct output_pixel_processor *dce120_opp_create(
376 ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); 376 ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
377 return &opp->base; 377 return &opp->base;
378} 378}
379struct engine *dce120_aux_engine_create( 379struct aux_engine *dce120_aux_engine_create(
380 struct dc_context *ctx, 380 struct dc_context *ctx,
381 uint32_t inst) 381 uint32_t inst)
382{ 382{
@@ -390,7 +390,7 @@ struct engine *dce120_aux_engine_create(
390 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, 390 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
391 &aux_engine_regs[inst]); 391 &aux_engine_regs[inst]);
392 392
393 return &aux_engine->base.base; 393 return &aux_engine->base;
394} 394}
395 395
396static const struct bios_registers bios_regs = { 396static const struct bios_registers bios_regs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 6fb33ad2d3c8..dc9f3e9afc33 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -464,7 +464,7 @@ static struct output_pixel_processor *dce80_opp_create(
464 return &opp->base; 464 return &opp->base;
465} 465}
466 466
467struct engine *dce80_aux_engine_create( 467struct aux_engine *dce80_aux_engine_create(
468 struct dc_context *ctx, 468 struct dc_context *ctx,
469 uint32_t inst) 469 uint32_t inst)
470{ 470{
@@ -478,7 +478,7 @@ struct engine *dce80_aux_engine_create(
478 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, 478 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
479 &aux_engine_regs[inst]); 479 &aux_engine_regs[inst]);
480 480
481 return &aux_engine->base.base; 481 return &aux_engine->base;
482} 482}
483 483
484static struct stream_encoder *dce80_stream_encoder_create( 484static struct stream_encoder *dce80_stream_encoder_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index c39934f8677f..6b44ed3697a4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -594,7 +594,7 @@ static struct output_pixel_processor *dcn10_opp_create(
594 return &opp->base; 594 return &opp->base;
595} 595}
596 596
597struct engine *dcn10_aux_engine_create( 597struct aux_engine *dcn10_aux_engine_create(
598 struct dc_context *ctx, 598 struct dc_context *ctx,
599 uint32_t inst) 599 uint32_t inst)
600{ 600{
@@ -608,7 +608,7 @@ struct engine *dcn10_aux_engine_create(
608 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, 608 SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
609 &aux_engine_regs[inst]); 609 &aux_engine_regs[inst]);
610 610
611 return &aux_engine->base.base; 611 return &aux_engine->base;
612} 612}
613 613
614static struct mpc *dcn10_mpc_create(struct dc_context *ctx) 614static struct mpc *dcn10_mpc_create(struct dc_context *ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 0fa385872ed3..9f33306f9014 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -138,7 +138,7 @@ struct resource_pool {
138 struct output_pixel_processor *opps[MAX_PIPES]; 138 struct output_pixel_processor *opps[MAX_PIPES];
139 struct timing_generator *timing_generators[MAX_PIPES]; 139 struct timing_generator *timing_generators[MAX_PIPES];
140 struct stream_encoder *stream_enc[MAX_PIPES * 2]; 140 struct stream_encoder *stream_enc[MAX_PIPES * 2];
141 struct engine *engines[MAX_PIPES]; 141 struct aux_engine *engines[MAX_PIPES];
142 struct hubbub *hubbub; 142 struct hubbub *hubbub;
143 struct mpc *mpc; 143 struct mpc *mpc;
144 struct pp_smu_funcs_rv *pp_smu; 144 struct pp_smu_funcs_rv *pp_smu;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 697b5ee73845..a37255c757e0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -35,7 +35,8 @@ struct dc_link_settings;
35 35
36bool dp_verify_link_cap( 36bool dp_verify_link_cap(
37 struct dc_link *link, 37 struct dc_link *link,
38 struct dc_link_settings *known_limit_link_setting); 38 struct dc_link_settings *known_limit_link_setting,
39 int *fail_count);
39 40
40bool dp_validate_mode_timing( 41bool dp_validate_mode_timing(
41 struct dc_link *link, 42 struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
index 06d7e5d4cf21..e79cd4e92919 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
@@ -26,46 +26,72 @@
26#ifndef __DAL_AUX_ENGINE_H__ 26#ifndef __DAL_AUX_ENGINE_H__
27#define __DAL_AUX_ENGINE_H__ 27#define __DAL_AUX_ENGINE_H__
28 28
29#include "engine.h" 29#include "dc_ddc_types.h"
30#include "include/i2caux_interface.h" 30#include "include/i2caux_interface.h"
31 31
32struct aux_engine; 32enum i2caux_transaction_operation {
33union aux_config; 33 I2CAUX_TRANSACTION_READ,
34struct aux_engine_funcs { 34 I2CAUX_TRANSACTION_WRITE
35 void (*destroy)( 35};
36 struct aux_engine **ptr); 36
37 bool (*acquire_engine)( 37enum i2caux_transaction_address_space {
38 struct aux_engine *engine); 38 I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C = 1,
39 void (*configure)( 39 I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD
40 struct aux_engine *engine, 40};
41 union aux_config cfg); 41
42 void (*submit_channel_request)( 42struct i2caux_transaction_payload {
43 struct aux_engine *engine, 43 enum i2caux_transaction_address_space address_space;
44 struct aux_request_transaction_data *request); 44 uint32_t address;
45 void (*process_channel_reply)( 45 uint32_t length;
46 struct aux_engine *engine, 46 uint8_t *data;
47 struct aux_reply_transaction_data *reply); 47};
48 int (*read_channel_reply)( 48
49 struct aux_engine *engine, 49enum i2caux_transaction_status {
50 uint32_t size, 50 I2CAUX_TRANSACTION_STATUS_UNKNOWN = (-1L),
51 uint8_t *buffer, 51 I2CAUX_TRANSACTION_STATUS_SUCCEEDED,
52 uint8_t *reply_result, 52 I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY,
53 uint32_t *sw_status); 53 I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT,
54 enum aux_channel_operation_result (*get_channel_status)( 54 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR,
55 struct aux_engine *engine, 55 I2CAUX_TRANSACTION_STATUS_FAILED_NACK,
56 uint8_t *returned_bytes); 56 I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE,
57 bool (*is_engine_available)(struct aux_engine *engine); 57 I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION,
58 I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
59 I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW,
60 I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON
61};
62
63struct i2caux_transaction_request {
64 enum i2caux_transaction_operation operation;
65 struct i2caux_transaction_payload payload;
66 enum i2caux_transaction_status status;
67};
68
69enum i2caux_engine_type {
70 I2CAUX_ENGINE_TYPE_UNKNOWN = (-1L),
71 I2CAUX_ENGINE_TYPE_AUX,
72 I2CAUX_ENGINE_TYPE_I2C_DDC_HW,
73 I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW,
74 I2CAUX_ENGINE_TYPE_I2C_SW
75};
76
77enum i2c_default_speed {
78 I2CAUX_DEFAULT_I2C_HW_SPEED = 50,
79 I2CAUX_DEFAULT_I2C_SW_SPEED = 50
58}; 80};
59struct engine; 81
82union aux_config;
83
60struct aux_engine { 84struct aux_engine {
61 struct engine base; 85 uint32_t inst;
86 struct ddc *ddc;
87 struct dc_context *ctx;
62 const struct aux_engine_funcs *funcs; 88 const struct aux_engine_funcs *funcs;
63 /* following values are expressed in milliseconds */ 89 /* following values are expressed in milliseconds */
64 uint32_t delay; 90 uint32_t delay;
65 uint32_t max_defer_write_retry; 91 uint32_t max_defer_write_retry;
66
67 bool acquire_reset; 92 bool acquire_reset;
68}; 93};
94
69struct read_command_context { 95struct read_command_context {
70 uint8_t *buffer; 96 uint8_t *buffer;
71 uint32_t current_read_length; 97 uint32_t current_read_length;
@@ -86,6 +112,7 @@ struct read_command_context {
86 bool transaction_complete; 112 bool transaction_complete;
87 bool operation_succeeded; 113 bool operation_succeeded;
88}; 114};
115
89struct write_command_context { 116struct write_command_context {
90 bool mot; 117 bool mot;
91 118
@@ -110,4 +137,44 @@ struct write_command_context {
110 bool transaction_complete; 137 bool transaction_complete;
111 bool operation_succeeded; 138 bool operation_succeeded;
112}; 139};
140
141
142struct aux_engine_funcs {
143 void (*destroy)(
144 struct aux_engine **ptr);
145 bool (*acquire_engine)(
146 struct aux_engine *engine);
147 void (*configure)(
148 struct aux_engine *engine,
149 union aux_config cfg);
150 void (*submit_channel_request)(
151 struct aux_engine *engine,
152 struct aux_request_transaction_data *request);
153 void (*process_channel_reply)(
154 struct aux_engine *engine,
155 struct aux_reply_transaction_data *reply);
156 int (*read_channel_reply)(
157 struct aux_engine *engine,
158 uint32_t size,
159 uint8_t *buffer,
160 uint8_t *reply_result,
161 uint32_t *sw_status);
162 enum aux_channel_operation_result (*get_channel_status)(
163 struct aux_engine *engine,
164 uint8_t *returned_bytes);
165 bool (*is_engine_available)(struct aux_engine *engine);
166 enum i2caux_engine_type (*get_engine_type)(
167 const struct aux_engine *engine);
168 bool (*acquire)(
169 struct aux_engine *engine,
170 struct ddc *ddc);
171 bool (*submit_request)(
172 struct aux_engine *engine,
173 struct i2caux_transaction_request *request,
174 bool middle_of_transaction);
175 void (*release_engine)(
176 struct aux_engine *engine);
177 void (*destroy_engine)(
178 struct aux_engine **engine);
179};
113#endif 180#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index de60f940030d..4550747fb61c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -48,7 +48,7 @@ struct dmcu_funcs {
48 const char *src, 48 const char *src,
49 unsigned int bytes); 49 unsigned int bytes);
50 void (*set_psr_enable)(struct dmcu *dmcu, bool enable, bool wait); 50 void (*set_psr_enable)(struct dmcu *dmcu, bool enable, bool wait);
51 void (*setup_psr)(struct dmcu *dmcu, 51 bool (*setup_psr)(struct dmcu *dmcu,
52 struct dc_link *link, 52 struct dc_link *link,
53 struct psr_context *psr_context); 53 struct psr_context *psr_context);
54 void (*get_psr_state)(struct dmcu *dmcu, uint32_t *psr_state); 54 void (*get_psr_state)(struct dmcu *dmcu, uint32_t *psr_state);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/engine.h
deleted file mode 100644
index 1f5476f41236..000000000000
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/engine.h
+++ /dev/null
@@ -1,106 +0,0 @@
1/*
2 * Copyright 2012-15 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#ifndef __DAL_ENGINE_H__
27#define __DAL_ENGINE_H__
28
29#include "dc_ddc_types.h"
30
31enum i2caux_transaction_operation {
32 I2CAUX_TRANSACTION_READ,
33 I2CAUX_TRANSACTION_WRITE
34};
35
36enum i2caux_transaction_address_space {
37 I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C = 1,
38 I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD
39};
40
41struct i2caux_transaction_payload {
42 enum i2caux_transaction_address_space address_space;
43 uint32_t address;
44 uint32_t length;
45 uint8_t *data;
46};
47
48enum i2caux_transaction_status {
49 I2CAUX_TRANSACTION_STATUS_UNKNOWN = (-1L),
50 I2CAUX_TRANSACTION_STATUS_SUCCEEDED,
51 I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY,
52 I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT,
53 I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR,
54 I2CAUX_TRANSACTION_STATUS_FAILED_NACK,
55 I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE,
56 I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION,
57 I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
58 I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW,
59 I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON
60};
61
62struct i2caux_transaction_request {
63 enum i2caux_transaction_operation operation;
64 struct i2caux_transaction_payload payload;
65 enum i2caux_transaction_status status;
66};
67
68enum i2caux_engine_type {
69 I2CAUX_ENGINE_TYPE_UNKNOWN = (-1L),
70 I2CAUX_ENGINE_TYPE_AUX,
71 I2CAUX_ENGINE_TYPE_I2C_DDC_HW,
72 I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW,
73 I2CAUX_ENGINE_TYPE_I2C_SW
74};
75
76enum i2c_default_speed {
77 I2CAUX_DEFAULT_I2C_HW_SPEED = 50,
78 I2CAUX_DEFAULT_I2C_SW_SPEED = 50
79};
80
81struct engine;
82
83struct engine_funcs {
84 enum i2caux_engine_type (*get_engine_type)(
85 const struct engine *engine);
86 struct aux_engine* (*acquire)(
87 struct engine *engine,
88 struct ddc *ddc);
89 bool (*submit_request)(
90 struct engine *engine,
91 struct i2caux_transaction_request *request,
92 bool middle_of_transaction);
93 void (*release_engine)(
94 struct engine *engine);
95 void (*destroy_engine)(
96 struct engine **engine);
97};
98
99struct engine {
100 const struct engine_funcs *funcs;
101 uint32_t inst;
102 struct ddc *ddc;
103 struct dc_context *ctx;
104};
105
106#endif
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 33b4de4ad66e..4bc118df3bc4 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1074,7 +1074,7 @@ struct atom_integrated_system_info_v1_11
1074 uint16_t dpphy_override; // bit vector, enum of atom_sysinfo_dpphy_override_def 1074 uint16_t dpphy_override; // bit vector, enum of atom_sysinfo_dpphy_override_def
1075 uint16_t lvds_misc; // enum of atom_sys_info_lvds_misc_def 1075 uint16_t lvds_misc; // enum of atom_sys_info_lvds_misc_def
1076 uint16_t backlight_pwm_hz; // pwm frequency in hz 1076 uint16_t backlight_pwm_hz; // pwm frequency in hz
1077 uint8_t memorytype; // enum of atom_sys_mem_type 1077 uint8_t memorytype; // enum of atom_dmi_t17_mem_type_def, APU memory type indication.
1078 uint8_t umachannelnumber; // number of memory channels 1078 uint8_t umachannelnumber; // number of memory channels
1079 uint8_t pwr_on_digon_to_de; /* all pwr sequence numbers below are in uint of 4ms */ 1079 uint8_t pwr_on_digon_to_de; /* all pwr sequence numbers below are in uint of 4ms */
1080 uint8_t pwr_on_de_to_vary_bl; 1080 uint8_t pwr_on_de_to_vary_bl;
@@ -1084,18 +1084,25 @@ struct atom_integrated_system_info_v1_11
1084 uint8_t pwr_on_vary_bl_to_blon; 1084 uint8_t pwr_on_vary_bl_to_blon;
1085 uint8_t pwr_down_bloff_to_vary_bloff; 1085 uint8_t pwr_down_bloff_to_vary_bloff;
1086 uint8_t min_allowed_bl_level; 1086 uint8_t min_allowed_bl_level;
1087 uint8_t htc_hyst_limit;
1088 uint8_t htc_tmp_limit;
1089 uint8_t reserved1;
1090 uint8_t reserved2;
1087 struct atom_external_display_connection_info extdispconninfo; 1091 struct atom_external_display_connection_info extdispconninfo;
1088 struct atom_14nm_dpphy_dvihdmi_tuningset dvi_tuningset; 1092 struct atom_14nm_dpphy_dvihdmi_tuningset dvi_tuningset;
1089 struct atom_14nm_dpphy_dvihdmi_tuningset hdmi_tuningset; 1093 struct atom_14nm_dpphy_dvihdmi_tuningset hdmi_tuningset;
1090 struct atom_14nm_dpphy_dvihdmi_tuningset hdmi6g_tuningset; 1094 struct atom_14nm_dpphy_dvihdmi_tuningset hdmi6g_tuningset;
1091 struct atom_14nm_dpphy_dp_tuningset dp_tuningset; 1095 struct atom_14nm_dpphy_dp_tuningset dp_tuningset; // rbr 1.62G dp tuning set
1092 struct atom_14nm_dpphy_dp_tuningset dp_hbr3_tuningset; 1096 struct atom_14nm_dpphy_dp_tuningset dp_hbr3_tuningset; // HBR3 dp tuning set
1093 struct atom_camera_data camera_info; 1097 struct atom_camera_data camera_info;
1094 struct atom_hdmi_retimer_redriver_set dp0_retimer_set; //for DP0 1098 struct atom_hdmi_retimer_redriver_set dp0_retimer_set; //for DP0
1095 struct atom_hdmi_retimer_redriver_set dp1_retimer_set; //for DP1 1099 struct atom_hdmi_retimer_redriver_set dp1_retimer_set; //for DP1
1096 struct atom_hdmi_retimer_redriver_set dp2_retimer_set; //for DP2 1100 struct atom_hdmi_retimer_redriver_set dp2_retimer_set; //for DP2
1097 struct atom_hdmi_retimer_redriver_set dp3_retimer_set; //for DP3 1101 struct atom_hdmi_retimer_redriver_set dp3_retimer_set; //for DP3
1098 uint32_t reserved[108]; 1102 struct atom_14nm_dpphy_dp_tuningset dp_hbr_tuningset; //hbr 2.7G dp tuning set
1103 struct atom_14nm_dpphy_dp_tuningset dp_hbr2_tuningset; //hbr2 5.4G dp turnig set
1104 struct atom_14nm_dpphy_dp_tuningset edp_tuningset; //edp tuning set
1105 uint32_t reserved[66];
1099}; 1106};
1100 1107
1101 1108
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index c952845833d7..5e19f5977eb1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -403,6 +403,49 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris12[] = {
403 { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, 403 { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
404 404
405 { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, 405 { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
406 { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
407
408 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
409 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
410 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
411 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
412 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
413 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
414
415 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
416 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
417 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
418 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND },
419 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
420
421 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
422 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
423 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND },
424 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
425
426 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
427 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
428 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
429 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
430 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
431 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
432 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
433 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
434
435 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND },
436 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
437 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND },
438 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND },
439
440 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
441 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND },
442 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
443 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
444
445 { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
446 { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
447
448 { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
406 { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, 449 { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
407 450
408 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND }, 451 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index 288802f209dd..0adfc5392cd3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -244,6 +244,7 @@ static int smu8_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
244 return 0; 244 return 0;
245} 245}
246 246
247/* convert form 8bit vid to real voltage in mV*4 */
247static uint32_t smu8_convert_8Bit_index_to_voltage( 248static uint32_t smu8_convert_8Bit_index_to_voltage(
248 struct pp_hwmgr *hwmgr, uint16_t voltage) 249 struct pp_hwmgr *hwmgr, uint16_t voltage)
249{ 250{
@@ -1702,13 +1703,13 @@ static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1702 case AMDGPU_PP_SENSOR_VDDNB: 1703 case AMDGPU_PP_SENSOR_VDDNB:
1703 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) & 1704 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
1704 CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT; 1705 CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
1705 vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp); 1706 vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp) / 4;
1706 *((uint32_t *)value) = vddnb; 1707 *((uint32_t *)value) = vddnb;
1707 return 0; 1708 return 0;
1708 case AMDGPU_PP_SENSOR_VDDGFX: 1709 case AMDGPU_PP_SENSOR_VDDGFX:
1709 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) & 1710 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
1710 CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT; 1711 CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
1711 vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp); 1712 vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp) / 4;
1712 *((uint32_t *)value) = vddgfx; 1713 *((uint32_t *)value) = vddgfx;
1713 return 0; 1714 return 0;
1714 case AMDGPU_PP_SENSOR_UVD_VCLK: 1715 case AMDGPU_PP_SENSOR_UVD_VCLK:
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index a4ce199af475..1276f168ff68 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -1204,7 +1204,6 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1204 (struct phm_ppt_v1_information *)(hwmgr->pptable); 1204 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1205 SMIO_Pattern vol_level; 1205 SMIO_Pattern vol_level;
1206 uint32_t mvdd; 1206 uint32_t mvdd;
1207 uint16_t us_mvdd;
1208 1207
1209 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; 1208 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
1210 1209
@@ -1255,16 +1254,11 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1255 "in Clock Dependency Table", 1254 "in Clock Dependency Table",
1256 ); 1255 );
1257 1256
1258 us_mvdd = 0; 1257 if (!((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
1259 if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) || 1258 (data->mclk_dpm_key_disabled)))
1260 (data->mclk_dpm_key_disabled)) 1259 polaris10_populate_mvdd_value(hwmgr,
1261 us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
1262 else {
1263 if (!polaris10_populate_mvdd_value(hwmgr,
1264 data->dpm_table.mclk_table.dpm_levels[0].value, 1260 data->dpm_table.mclk_table.dpm_levels[0].value,
1265 &vol_level)) 1261 &vol_level);
1266 us_mvdd = vol_level.Voltage;
1267 }
1268 1262
1269 if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level)) 1263 if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
1270 table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage); 1264 table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
@@ -1517,7 +1511,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1517 uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min; 1511 uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
1518 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); 1512 struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
1519 1513
1520 uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0; 1514 uint8_t i, stretch_amount, volt_offset = 0;
1521 struct phm_ppt_v1_information *table_info = 1515 struct phm_ppt_v1_information *table_info =
1522 (struct phm_ppt_v1_information *)(hwmgr->pptable); 1516 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1523 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = 1517 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
@@ -1568,11 +1562,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1568 1562
1569 smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6; 1563 smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
1570 /* Populate CKS Lookup Table */ 1564 /* Populate CKS Lookup Table */
1571 if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) 1565 if (stretch_amount == 0 || stretch_amount > 5) {
1572 stretch_amount2 = 0;
1573 else if (stretch_amount == 3 || stretch_amount == 4)
1574 stretch_amount2 = 1;
1575 else {
1576 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 1566 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1577 PHM_PlatformCaps_ClockStretcher); 1567 PHM_PlatformCaps_ClockStretcher);
1578 PP_ASSERT_WITH_CODE(false, 1568 PP_ASSERT_WITH_CODE(false,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index edbb4cd519fd..ba2fd295697f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -307,7 +307,7 @@ struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
307 if (bo == NULL) 307 if (bo == NULL)
308 return NULL; 308 return NULL;
309 309
310 ttm_bo_reference(&bo->tbo); 310 ttm_bo_get(&bo->tbo);
311 return bo; 311 return bo;
312} 312}
313 313
@@ -320,9 +320,8 @@ void radeon_bo_unref(struct radeon_bo **bo)
320 return; 320 return;
321 rdev = (*bo)->rdev; 321 rdev = (*bo)->rdev;
322 tbo = &((*bo)->tbo); 322 tbo = &((*bo)->tbo);
323 ttm_bo_unref(&tbo); 323 ttm_bo_put(tbo);
324 if (tbo == NULL) 324 *bo = NULL;
325 *bo = NULL;
326} 325}
327 326
328int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, 327int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
diff --git a/drivers/gpu/drm/scheduler/Makefile b/drivers/gpu/drm/scheduler/Makefile
index bd0377c0d2ee..7665883f81d4 100644
--- a/drivers/gpu/drm/scheduler/Makefile
+++ b/drivers/gpu/drm/scheduler/Makefile
@@ -20,7 +20,6 @@
20# OTHER DEALINGS IN THE SOFTWARE. 20# OTHER DEALINGS IN THE SOFTWARE.
21# 21#
22# 22#
23ccflags-y := -Iinclude/drm
24gpu-sched-y := gpu_scheduler.o sched_fence.o 23gpu-sched-y := gpu_scheduler.o sched_fence.o
25 24
26obj-$(CONFIG_DRM_SCHED) += gpu-sched.o 25obj-$(CONFIG_DRM_SCHED) += gpu-sched.o
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 3f2fc5e8242a..1b733229201e 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -199,21 +199,6 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
199EXPORT_SYMBOL(drm_sched_entity_init); 199EXPORT_SYMBOL(drm_sched_entity_init);
200 200
201/** 201/**
202 * drm_sched_entity_is_initialized - Query if entity is initialized
203 *
204 * @sched: Pointer to scheduler instance
205 * @entity: The pointer to a valid scheduler entity
206 *
207 * return true if entity is initialized, false otherwise
208*/
209static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
210 struct drm_sched_entity *entity)
211{
212 return entity->rq != NULL &&
213 entity->rq->sched == sched;
214}
215
216/**
217 * drm_sched_entity_is_idle - Check if entity is idle 202 * drm_sched_entity_is_idle - Check if entity is idle
218 * 203 *
219 * @entity: scheduler entity 204 * @entity: scheduler entity
@@ -224,7 +209,8 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
224{ 209{
225 rmb(); 210 rmb();
226 211
227 if (!entity->rq || spsc_queue_peek(&entity->job_queue) == NULL) 212 if (list_empty(&entity->list) ||
213 spsc_queue_peek(&entity->job_queue) == NULL)
228 return true; 214 return true;
229 215
230 return false; 216 return false;
@@ -275,11 +261,10 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
275long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) 261long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
276{ 262{
277 struct drm_gpu_scheduler *sched; 263 struct drm_gpu_scheduler *sched;
264 struct task_struct *last_user;
278 long ret = timeout; 265 long ret = timeout;
279 266
280 sched = entity->rq->sched; 267 sched = entity->rq->sched;
281 if (!drm_sched_entity_is_initialized(sched, entity))
282 return ret;
283 /** 268 /**
284 * The client will not queue more IBs during this fini, consume existing 269 * The client will not queue more IBs during this fini, consume existing
285 * queued IBs or discard them on SIGKILL 270 * queued IBs or discard them on SIGKILL
@@ -295,8 +280,10 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
295 280
296 281
297 /* For killed process disable any more IBs enqueue right now */ 282 /* For killed process disable any more IBs enqueue right now */
298 if ((current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) 283 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
299 drm_sched_entity_set_rq(entity, NULL); 284 if ((!last_user || last_user == current->group_leader) &&
285 (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
286 drm_sched_rq_remove_entity(entity->rq, entity);
300 287
301 return ret; 288 return ret;
302} 289}
@@ -317,7 +304,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
317 struct drm_gpu_scheduler *sched; 304 struct drm_gpu_scheduler *sched;
318 305
319 sched = entity->rq->sched; 306 sched = entity->rq->sched;
320 drm_sched_entity_set_rq(entity, NULL); 307 drm_sched_rq_remove_entity(entity->rq, entity);
321 308
322 /* Consumption of existing IBs wasn't completed. Forcefully 309 /* Consumption of existing IBs wasn't completed. Forcefully
323 * remove them here. 310 * remove them here.
@@ -413,15 +400,12 @@ void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
413 if (entity->rq == rq) 400 if (entity->rq == rq)
414 return; 401 return;
415 402
416 spin_lock(&entity->rq_lock); 403 BUG_ON(!rq);
417
418 if (entity->rq)
419 drm_sched_rq_remove_entity(entity->rq, entity);
420 404
405 spin_lock(&entity->rq_lock);
406 drm_sched_rq_remove_entity(entity->rq, entity);
421 entity->rq = rq; 407 entity->rq = rq;
422 if (rq) 408 drm_sched_rq_add_entity(rq, entity);
423 drm_sched_rq_add_entity(rq, entity);
424
425 spin_unlock(&entity->rq_lock); 409 spin_unlock(&entity->rq_lock);
426} 410}
427EXPORT_SYMBOL(drm_sched_entity_set_rq); 411EXPORT_SYMBOL(drm_sched_entity_set_rq);
@@ -541,6 +525,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
541 525
542 trace_drm_sched_job(sched_job, entity); 526 trace_drm_sched_job(sched_job, entity);
543 527
528 WRITE_ONCE(entity->last_user, current->group_leader);
544 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); 529 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
545 530
546 /* first job wakes up scheduler */ 531 /* first job wakes up scheduler */
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 6e2d1300b457..f841accc2c00 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -47,13 +47,7 @@
47 47
48#include <drm/ttm/ttm_bo_driver.h> 48#include <drm/ttm/ttm_bo_driver.h>
49#include <drm/ttm/ttm_page_alloc.h> 49#include <drm/ttm/ttm_page_alloc.h>
50 50#include <drm/ttm/ttm_set_memory.h>
51#if IS_ENABLED(CONFIG_AGP)
52#include <asm/agp.h>
53#endif
54#ifdef CONFIG_X86
55#include <asm/set_memory.h>
56#endif
57 51
58#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) 52#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
59#define SMALL_ALLOCATION 16 53#define SMALL_ALLOCATION 16
@@ -222,52 +216,6 @@ static struct kobj_type ttm_pool_kobj_type = {
222 216
223static struct ttm_pool_manager *_manager; 217static struct ttm_pool_manager *_manager;
224 218
225#ifndef CONFIG_X86
226static int set_pages_wb(struct page *page, int numpages)
227{
228#if IS_ENABLED(CONFIG_AGP)
229 int i;
230
231 for (i = 0; i < numpages; i++)
232 unmap_page_from_agp(page++);
233#endif
234 return 0;
235}
236
237static int set_pages_array_wb(struct page **pages, int addrinarray)
238{
239#if IS_ENABLED(CONFIG_AGP)
240 int i;
241
242 for (i = 0; i < addrinarray; i++)
243 unmap_page_from_agp(pages[i]);
244#endif
245 return 0;
246}
247
248static int set_pages_array_wc(struct page **pages, int addrinarray)
249{
250#if IS_ENABLED(CONFIG_AGP)
251 int i;
252
253 for (i = 0; i < addrinarray; i++)
254 map_page_into_agp(pages[i]);
255#endif
256 return 0;
257}
258
259static int set_pages_array_uc(struct page **pages, int addrinarray)
260{
261#if IS_ENABLED(CONFIG_AGP)
262 int i;
263
264 for (i = 0; i < addrinarray; i++)
265 map_page_into_agp(pages[i]);
266#endif
267 return 0;
268}
269#endif
270
271/** 219/**
272 * Select the right pool or requested caching state and ttm flags. */ 220 * Select the right pool or requested caching state and ttm flags. */
273static struct ttm_page_pool *ttm_get_pool(int flags, bool huge, 221static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
@@ -302,13 +250,13 @@ static void ttm_pages_put(struct page *pages[], unsigned npages,
302 unsigned int i, pages_nr = (1 << order); 250 unsigned int i, pages_nr = (1 << order);
303 251
304 if (order == 0) { 252 if (order == 0) {
305 if (set_pages_array_wb(pages, npages)) 253 if (ttm_set_pages_array_wb(pages, npages))
306 pr_err("Failed to set %d pages to wb!\n", npages); 254 pr_err("Failed to set %d pages to wb!\n", npages);
307 } 255 }
308 256
309 for (i = 0; i < npages; ++i) { 257 for (i = 0; i < npages; ++i) {
310 if (order > 0) { 258 if (order > 0) {
311 if (set_pages_wb(pages[i], pages_nr)) 259 if (ttm_set_pages_wb(pages[i], pages_nr))
312 pr_err("Failed to set %d pages to wb!\n", pages_nr); 260 pr_err("Failed to set %d pages to wb!\n", pages_nr);
313 } 261 }
314 __free_pages(pages[i], order); 262 __free_pages(pages[i], order);
@@ -498,12 +446,12 @@ static int ttm_set_pages_caching(struct page **pages,
498 /* Set page caching */ 446 /* Set page caching */
499 switch (cstate) { 447 switch (cstate) {
500 case tt_uncached: 448 case tt_uncached:
501 r = set_pages_array_uc(pages, cpages); 449 r = ttm_set_pages_array_uc(pages, cpages);
502 if (r) 450 if (r)
503 pr_err("Failed to set %d pages to uc!\n", cpages); 451 pr_err("Failed to set %d pages to uc!\n", cpages);
504 break; 452 break;
505 case tt_wc: 453 case tt_wc:
506 r = set_pages_array_wc(pages, cpages); 454 r = ttm_set_pages_array_wc(pages, cpages);
507 if (r) 455 if (r)
508 pr_err("Failed to set %d pages to wc!\n", cpages); 456 pr_err("Failed to set %d pages to wc!\n", cpages);
509 break; 457 break;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 3f14c1cc0789..507be7ac1165 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -50,12 +50,7 @@
50#include <linux/kthread.h> 50#include <linux/kthread.h>
51#include <drm/ttm/ttm_bo_driver.h> 51#include <drm/ttm/ttm_bo_driver.h>
52#include <drm/ttm/ttm_page_alloc.h> 52#include <drm/ttm/ttm_page_alloc.h>
53#if IS_ENABLED(CONFIG_AGP) 53#include <drm/ttm/ttm_set_memory.h>
54#include <asm/agp.h>
55#endif
56#ifdef CONFIG_X86
57#include <asm/set_memory.h>
58#endif
59 54
60#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) 55#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
61#define SMALL_ALLOCATION 4 56#define SMALL_ALLOCATION 4
@@ -268,54 +263,19 @@ static struct kobj_type ttm_pool_kobj_type = {
268 .default_attrs = ttm_pool_attrs, 263 .default_attrs = ttm_pool_attrs,
269}; 264};
270 265
271#ifndef CONFIG_X86
272static int set_pages_array_wb(struct page **pages, int addrinarray)
273{
274#if IS_ENABLED(CONFIG_AGP)
275 int i;
276
277 for (i = 0; i < addrinarray; i++)
278 unmap_page_from_agp(pages[i]);
279#endif
280 return 0;
281}
282
283static int set_pages_array_wc(struct page **pages, int addrinarray)
284{
285#if IS_ENABLED(CONFIG_AGP)
286 int i;
287
288 for (i = 0; i < addrinarray; i++)
289 map_page_into_agp(pages[i]);
290#endif
291 return 0;
292}
293
294static int set_pages_array_uc(struct page **pages, int addrinarray)
295{
296#if IS_ENABLED(CONFIG_AGP)
297 int i;
298
299 for (i = 0; i < addrinarray; i++)
300 map_page_into_agp(pages[i]);
301#endif
302 return 0;
303}
304#endif /* for !CONFIG_X86 */
305
306static int ttm_set_pages_caching(struct dma_pool *pool, 266static int ttm_set_pages_caching(struct dma_pool *pool,
307 struct page **pages, unsigned cpages) 267 struct page **pages, unsigned cpages)
308{ 268{
309 int r = 0; 269 int r = 0;
310 /* Set page caching */ 270 /* Set page caching */
311 if (pool->type & IS_UC) { 271 if (pool->type & IS_UC) {
312 r = set_pages_array_uc(pages, cpages); 272 r = ttm_set_pages_array_uc(pages, cpages);
313 if (r) 273 if (r)
314 pr_err("%s: Failed to set %d pages to uc!\n", 274 pr_err("%s: Failed to set %d pages to uc!\n",
315 pool->dev_name, cpages); 275 pool->dev_name, cpages);
316 } 276 }
317 if (pool->type & IS_WC) { 277 if (pool->type & IS_WC) {
318 r = set_pages_array_wc(pages, cpages); 278 r = ttm_set_pages_array_wc(pages, cpages);
319 if (r) 279 if (r)
320 pr_err("%s: Failed to set %d pages to wc!\n", 280 pr_err("%s: Failed to set %d pages to wc!\n",
321 pool->dev_name, cpages); 281 pool->dev_name, cpages);
@@ -389,17 +349,14 @@ static void ttm_pool_update_free_locked(struct dma_pool *pool,
389static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page) 349static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
390{ 350{
391 struct page *page = d_page->p; 351 struct page *page = d_page->p;
392 unsigned i, num_pages; 352 unsigned num_pages;
393 353
394 /* Don't set WB on WB page pool. */ 354 /* Don't set WB on WB page pool. */
395 if (!(pool->type & IS_CACHED)) { 355 if (!(pool->type & IS_CACHED)) {
396 num_pages = pool->size / PAGE_SIZE; 356 num_pages = pool->size / PAGE_SIZE;
397 for (i = 0; i < num_pages; ++i, ++page) { 357 if (ttm_set_pages_wb(page, num_pages))
398 if (set_pages_array_wb(&page, 1)) { 358 pr_err("%s: Failed to set %d pages to wb!\n",
399 pr_err("%s: Failed to set %d pages to wb!\n", 359 pool->dev_name, num_pages);
400 pool->dev_name, 1);
401 }
402 }
403 } 360 }
404 361
405 list_del(&d_page->page_list); 362 list_del(&d_page->page_list);
@@ -420,7 +377,7 @@ static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
420 377
421 /* Don't set WB on WB page pool. */ 378 /* Don't set WB on WB page pool. */
422 if (npages && !(pool->type & IS_CACHED) && 379 if (npages && !(pool->type & IS_CACHED) &&
423 set_pages_array_wb(pages, npages)) 380 ttm_set_pages_array_wb(pages, npages))
424 pr_err("%s: Failed to set %d pages to wb!\n", 381 pr_err("%s: Failed to set %d pages to wb!\n",
425 pool->dev_name, npages); 382 pool->dev_name, npages);
426 383
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index a1e543972ca7..e3a0691582ff 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -38,9 +38,7 @@
38#include <drm/drm_cache.h> 38#include <drm/drm_cache.h>
39#include <drm/ttm/ttm_bo_driver.h> 39#include <drm/ttm/ttm_bo_driver.h>
40#include <drm/ttm/ttm_page_alloc.h> 40#include <drm/ttm/ttm_page_alloc.h>
41#ifdef CONFIG_X86 41#include <drm/ttm/ttm_set_memory.h>
42#include <asm/set_memory.h>
43#endif
44 42
45/** 43/**
46 * Allocates a ttm structure for the given BO. 44 * Allocates a ttm structure for the given BO.
@@ -115,10 +113,9 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
115 return 0; 113 return 0;
116} 114}
117 115
118#ifdef CONFIG_X86 116static int ttm_tt_set_page_caching(struct page *p,
119static inline int ttm_tt_set_page_caching(struct page *p, 117 enum ttm_caching_state c_old,
120 enum ttm_caching_state c_old, 118 enum ttm_caching_state c_new)
121 enum ttm_caching_state c_new)
122{ 119{
123 int ret = 0; 120 int ret = 0;
124 121
@@ -129,26 +126,18 @@ static inline int ttm_tt_set_page_caching(struct page *p,
129 /* p isn't in the default caching state, set it to 126 /* p isn't in the default caching state, set it to
130 * writeback first to free its current memtype. */ 127 * writeback first to free its current memtype. */
131 128
132 ret = set_pages_wb(p, 1); 129 ret = ttm_set_pages_wb(p, 1);
133 if (ret) 130 if (ret)
134 return ret; 131 return ret;
135 } 132 }
136 133
137 if (c_new == tt_wc) 134 if (c_new == tt_wc)
138 ret = set_memory_wc((unsigned long) page_address(p), 1); 135 ret = ttm_set_pages_wc(p, 1);
139 else if (c_new == tt_uncached) 136 else if (c_new == tt_uncached)
140 ret = set_pages_uc(p, 1); 137 ret = ttm_set_pages_uc(p, 1);
141 138
142 return ret; 139 return ret;
143} 140}
144#else /* CONFIG_X86 */
145static inline int ttm_tt_set_page_caching(struct page *p,
146 enum ttm_caching_state c_old,
147 enum ttm_caching_state c_new)
148{
149 return 0;
150}
151#endif /* CONFIG_X86 */
152 141
153/* 142/*
154 * Change caching policy for the linear kernel map 143 * Change caching policy for the linear kernel map
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 091b9afcd184..21c648b0b2a1 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -66,6 +66,7 @@ enum drm_sched_priority {
66 * @guilty: points to ctx's guilty. 66 * @guilty: points to ctx's guilty.
67 * @fini_status: contains the exit status in case the process was signalled. 67 * @fini_status: contains the exit status in case the process was signalled.
68 * @last_scheduled: points to the finished fence of the last scheduled job. 68 * @last_scheduled: points to the finished fence of the last scheduled job.
69 * @last_user: last group leader pushing a job into the entity.
69 * 70 *
70 * Entities will emit jobs in order to their corresponding hardware 71 * Entities will emit jobs in order to their corresponding hardware
71 * ring, and the scheduler will alternate between entities based on 72 * ring, and the scheduler will alternate between entities based on
@@ -85,6 +86,7 @@ struct drm_sched_entity {
85 struct dma_fence_cb cb; 86 struct dma_fence_cb cb;
86 atomic_t *guilty; 87 atomic_t *guilty;
87 struct dma_fence *last_scheduled; 88 struct dma_fence *last_scheduled;
89 struct task_struct *last_user;
88}; 90};
89 91
90/** 92/**
diff --git a/include/drm/ttm/ttm_set_memory.h b/include/drm/ttm/ttm_set_memory.h
new file mode 100644
index 000000000000..7c492b49e38c
--- /dev/null
+++ b/include/drm/ttm/ttm_set_memory.h
@@ -0,0 +1,150 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2018 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Huang Rui <ray.huang@amd.com>
29 */
30
31#ifndef TTM_SET_MEMORY
32#define TTM_SET_MEMORY
33
34#include <linux/mm.h>
35
36#ifdef CONFIG_X86
37
38#include <asm/set_memory.h>
39
40static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
41{
42 return set_pages_array_wb(pages, addrinarray);
43}
44
45static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
46{
47 return set_pages_array_wc(pages, addrinarray);
48}
49
50static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
51{
52 return set_pages_array_uc(pages, addrinarray);
53}
54
55static inline int ttm_set_pages_wb(struct page *page, int numpages)
56{
57 return set_pages_wb(page, numpages);
58}
59
60static inline int ttm_set_pages_wc(struct page *page, int numpages)
61{
62 unsigned long addr = (unsigned long)page_address(page);
63
64 return set_memory_wc(addr, numpages);
65}
66
67static inline int ttm_set_pages_uc(struct page *page, int numpages)
68{
69 return set_pages_uc(page, numpages);
70}
71
72#else /* for CONFIG_X86 */
73
74#if IS_ENABLED(CONFIG_AGP)
75
76#include <asm/agp.h>
77
78static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
79{
80 int i;
81
82 for (i = 0; i < addrinarray; i++)
83 unmap_page_from_agp(pages[i]);
84 return 0;
85}
86
87static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
88{
89 int i;
90
91 for (i = 0; i < addrinarray; i++)
92 map_page_into_agp(pages[i]);
93 return 0;
94}
95
96static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
97{
98 int i;
99
100 for (i = 0; i < addrinarray; i++)
101 map_page_into_agp(pages[i]);
102 return 0;
103}
104
105static inline int ttm_set_pages_wb(struct page *page, int numpages)
106{
107 int i;
108
109 for (i = 0; i < numpages; i++)
110 unmap_page_from_agp(page++);
111 return 0;
112}
113
114#else /* for CONFIG_AGP */
115
116static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
117{
118 return 0;
119}
120
121static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
122{
123 return 0;
124}
125
126static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
127{
128 return 0;
129}
130
131static inline int ttm_set_pages_wb(struct page *page, int numpages)
132{
133 return 0;
134}
135
136#endif /* for CONFIG_AGP */
137
138static inline int ttm_set_pages_wc(struct page *page, int numpages)
139{
140 return 0;
141}
142
143static inline int ttm_set_pages_uc(struct page *page, int numpages)
144{
145 return 0;
146}
147
148#endif /* for CONFIG_X86 */
149
150#endif