diff options
author | Dave Airlie <airlied@redhat.com> | 2019-04-23 21:24:22 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2019-04-23 21:46:19 -0400 |
commit | 42f1a013300dca601d779b02ed6d41f7b2cea362 (patch) | |
tree | f4cf0204125f32e99cd1bde5e3acf5189eb190fe | |
parent | 6e865c7230140126cbac62e0aef5807108e46e63 (diff) | |
parent | f55be0be5b7296e73f1634e2839a1953dc12d11e (diff) |
Merge branch 'drm-next-5.2' of git://people.freedesktop.org/~agd5f/linux into drm-next
- Add the amdgpu specific bits for timeline support
- Add internal interfaces for xgmi pstate support
- DC Z ordering fixes for planes
- Add support for NV12 planes in DC
- Add colorspace properties for planes in DC
- eDP optimizations if the GOP driver already initialized eDP
- DC bandwidth validation tracing support
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190419150034.3473-1-alexander.deucher@amd.com
27 files changed, 944 insertions, 361 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 6294316f24c7..14398f55f602 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -436,6 +436,12 @@ struct amdgpu_cs_chunk { | |||
436 | void *kdata; | 436 | void *kdata; |
437 | }; | 437 | }; |
438 | 438 | ||
439 | struct amdgpu_cs_post_dep { | ||
440 | struct drm_syncobj *syncobj; | ||
441 | struct dma_fence_chain *chain; | ||
442 | u64 point; | ||
443 | }; | ||
444 | |||
439 | struct amdgpu_cs_parser { | 445 | struct amdgpu_cs_parser { |
440 | struct amdgpu_device *adev; | 446 | struct amdgpu_device *adev; |
441 | struct drm_file *filp; | 447 | struct drm_file *filp; |
@@ -465,8 +471,8 @@ struct amdgpu_cs_parser { | |||
465 | /* user fence */ | 471 | /* user fence */ |
466 | struct amdgpu_bo_list_entry uf_entry; | 472 | struct amdgpu_bo_list_entry uf_entry; |
467 | 473 | ||
468 | unsigned num_post_dep_syncobjs; | 474 | unsigned num_post_deps; |
469 | struct drm_syncobj **post_dep_syncobjs; | 475 | struct amdgpu_cs_post_dep *post_deps; |
470 | }; | 476 | }; |
471 | 477 | ||
472 | static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, | 478 | static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 52a5e4fdc95b..2f6239b6be6f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -215,6 +215,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs | |||
215 | case AMDGPU_CHUNK_ID_SYNCOBJ_IN: | 215 | case AMDGPU_CHUNK_ID_SYNCOBJ_IN: |
216 | case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: | 216 | case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: |
217 | case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: | 217 | case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: |
218 | case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: | ||
219 | case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: | ||
218 | break; | 220 | break; |
219 | 221 | ||
220 | default: | 222 | default: |
@@ -804,9 +806,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, | |||
804 | ttm_eu_backoff_reservation(&parser->ticket, | 806 | ttm_eu_backoff_reservation(&parser->ticket, |
805 | &parser->validated); | 807 | &parser->validated); |
806 | 808 | ||
807 | for (i = 0; i < parser->num_post_dep_syncobjs; i++) | 809 | for (i = 0; i < parser->num_post_deps; i++) { |
808 | drm_syncobj_put(parser->post_dep_syncobjs[i]); | 810 | drm_syncobj_put(parser->post_deps[i].syncobj); |
809 | kfree(parser->post_dep_syncobjs); | 811 | kfree(parser->post_deps[i].chain); |
812 | } | ||
813 | kfree(parser->post_deps); | ||
810 | 814 | ||
811 | dma_fence_put(parser->fence); | 815 | dma_fence_put(parser->fence); |
812 | 816 | ||
@@ -1117,13 +1121,18 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, | |||
1117 | } | 1121 | } |
1118 | 1122 | ||
1119 | static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, | 1123 | static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, |
1120 | uint32_t handle) | 1124 | uint32_t handle, u64 point, |
1125 | u64 flags) | ||
1121 | { | 1126 | { |
1122 | int r; | ||
1123 | struct dma_fence *fence; | 1127 | struct dma_fence *fence; |
1124 | r = drm_syncobj_find_fence(p->filp, handle, 0, 0, &fence); | 1128 | int r; |
1125 | if (r) | 1129 | |
1130 | r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence); | ||
1131 | if (r) { | ||
1132 | DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n", | ||
1133 | handle, point, r); | ||
1126 | return r; | 1134 | return r; |
1135 | } | ||
1127 | 1136 | ||
1128 | r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true); | 1137 | r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true); |
1129 | dma_fence_put(fence); | 1138 | dma_fence_put(fence); |
@@ -1134,46 +1143,118 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, | |||
1134 | static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p, | 1143 | static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p, |
1135 | struct amdgpu_cs_chunk *chunk) | 1144 | struct amdgpu_cs_chunk *chunk) |
1136 | { | 1145 | { |
1146 | struct drm_amdgpu_cs_chunk_sem *deps; | ||
1137 | unsigned num_deps; | 1147 | unsigned num_deps; |
1138 | int i, r; | 1148 | int i, r; |
1139 | struct drm_amdgpu_cs_chunk_sem *deps; | ||
1140 | 1149 | ||
1141 | deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; | 1150 | deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; |
1142 | num_deps = chunk->length_dw * 4 / | 1151 | num_deps = chunk->length_dw * 4 / |
1143 | sizeof(struct drm_amdgpu_cs_chunk_sem); | 1152 | sizeof(struct drm_amdgpu_cs_chunk_sem); |
1153 | for (i = 0; i < num_deps; ++i) { | ||
1154 | r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle, | ||
1155 | 0, 0); | ||
1156 | if (r) | ||
1157 | return r; | ||
1158 | } | ||
1159 | |||
1160 | return 0; | ||
1161 | } | ||
1162 | |||
1144 | 1163 | ||
1164 | static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p, | ||
1165 | struct amdgpu_cs_chunk *chunk) | ||
1166 | { | ||
1167 | struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps; | ||
1168 | unsigned num_deps; | ||
1169 | int i, r; | ||
1170 | |||
1171 | syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata; | ||
1172 | num_deps = chunk->length_dw * 4 / | ||
1173 | sizeof(struct drm_amdgpu_cs_chunk_syncobj); | ||
1145 | for (i = 0; i < num_deps; ++i) { | 1174 | for (i = 0; i < num_deps; ++i) { |
1146 | r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle); | 1175 | r = amdgpu_syncobj_lookup_and_add_to_sync(p, |
1176 | syncobj_deps[i].handle, | ||
1177 | syncobj_deps[i].point, | ||
1178 | syncobj_deps[i].flags); | ||
1147 | if (r) | 1179 | if (r) |
1148 | return r; | 1180 | return r; |
1149 | } | 1181 | } |
1182 | |||
1150 | return 0; | 1183 | return 0; |
1151 | } | 1184 | } |
1152 | 1185 | ||
1153 | static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p, | 1186 | static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p, |
1154 | struct amdgpu_cs_chunk *chunk) | 1187 | struct amdgpu_cs_chunk *chunk) |
1155 | { | 1188 | { |
1189 | struct drm_amdgpu_cs_chunk_sem *deps; | ||
1156 | unsigned num_deps; | 1190 | unsigned num_deps; |
1157 | int i; | 1191 | int i; |
1158 | struct drm_amdgpu_cs_chunk_sem *deps; | 1192 | |
1159 | deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; | 1193 | deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; |
1160 | num_deps = chunk->length_dw * 4 / | 1194 | num_deps = chunk->length_dw * 4 / |
1161 | sizeof(struct drm_amdgpu_cs_chunk_sem); | 1195 | sizeof(struct drm_amdgpu_cs_chunk_sem); |
1162 | 1196 | ||
1163 | p->post_dep_syncobjs = kmalloc_array(num_deps, | 1197 | p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), |
1164 | sizeof(struct drm_syncobj *), | 1198 | GFP_KERNEL); |
1165 | GFP_KERNEL); | 1199 | p->num_post_deps = 0; |
1166 | p->num_post_dep_syncobjs = 0; | ||
1167 | 1200 | ||
1168 | if (!p->post_dep_syncobjs) | 1201 | if (!p->post_deps) |
1169 | return -ENOMEM; | 1202 | return -ENOMEM; |
1170 | 1203 | ||
1204 | |||
1171 | for (i = 0; i < num_deps; ++i) { | 1205 | for (i = 0; i < num_deps; ++i) { |
1172 | p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle); | 1206 | p->post_deps[i].syncobj = |
1173 | if (!p->post_dep_syncobjs[i]) | 1207 | drm_syncobj_find(p->filp, deps[i].handle); |
1208 | if (!p->post_deps[i].syncobj) | ||
1174 | return -EINVAL; | 1209 | return -EINVAL; |
1175 | p->num_post_dep_syncobjs++; | 1210 | p->post_deps[i].chain = NULL; |
1211 | p->post_deps[i].point = 0; | ||
1212 | p->num_post_deps++; | ||
1176 | } | 1213 | } |
1214 | |||
1215 | return 0; | ||
1216 | } | ||
1217 | |||
1218 | |||
1219 | static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p, | ||
1220 | struct amdgpu_cs_chunk | ||
1221 | *chunk) | ||
1222 | { | ||
1223 | struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps; | ||
1224 | unsigned num_deps; | ||
1225 | int i; | ||
1226 | |||
1227 | syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata; | ||
1228 | num_deps = chunk->length_dw * 4 / | ||
1229 | sizeof(struct drm_amdgpu_cs_chunk_syncobj); | ||
1230 | |||
1231 | p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), | ||
1232 | GFP_KERNEL); | ||
1233 | p->num_post_deps = 0; | ||
1234 | |||
1235 | if (!p->post_deps) | ||
1236 | return -ENOMEM; | ||
1237 | |||
1238 | for (i = 0; i < num_deps; ++i) { | ||
1239 | struct amdgpu_cs_post_dep *dep = &p->post_deps[i]; | ||
1240 | |||
1241 | dep->chain = NULL; | ||
1242 | if (syncobj_deps[i].point) { | ||
1243 | dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL); | ||
1244 | if (!dep->chain) | ||
1245 | return -ENOMEM; | ||
1246 | } | ||
1247 | |||
1248 | dep->syncobj = drm_syncobj_find(p->filp, | ||
1249 | syncobj_deps[i].handle); | ||
1250 | if (!dep->syncobj) { | ||
1251 | kfree(dep->chain); | ||
1252 | return -EINVAL; | ||
1253 | } | ||
1254 | dep->point = syncobj_deps[i].point; | ||
1255 | p->num_post_deps++; | ||
1256 | } | ||
1257 | |||
1177 | return 0; | 1258 | return 0; |
1178 | } | 1259 | } |
1179 | 1260 | ||
@@ -1187,19 +1268,33 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, | |||
1187 | 1268 | ||
1188 | chunk = &p->chunks[i]; | 1269 | chunk = &p->chunks[i]; |
1189 | 1270 | ||
1190 | if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES || | 1271 | switch (chunk->chunk_id) { |
1191 | chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { | 1272 | case AMDGPU_CHUNK_ID_DEPENDENCIES: |
1273 | case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: | ||
1192 | r = amdgpu_cs_process_fence_dep(p, chunk); | 1274 | r = amdgpu_cs_process_fence_dep(p, chunk); |
1193 | if (r) | 1275 | if (r) |
1194 | return r; | 1276 | return r; |
1195 | } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) { | 1277 | break; |
1278 | case AMDGPU_CHUNK_ID_SYNCOBJ_IN: | ||
1196 | r = amdgpu_cs_process_syncobj_in_dep(p, chunk); | 1279 | r = amdgpu_cs_process_syncobj_in_dep(p, chunk); |
1197 | if (r) | 1280 | if (r) |
1198 | return r; | 1281 | return r; |
1199 | } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) { | 1282 | break; |
1283 | case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: | ||
1200 | r = amdgpu_cs_process_syncobj_out_dep(p, chunk); | 1284 | r = amdgpu_cs_process_syncobj_out_dep(p, chunk); |
1201 | if (r) | 1285 | if (r) |
1202 | return r; | 1286 | return r; |
1287 | break; | ||
1288 | case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: | ||
1289 | r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk); | ||
1290 | if (r) | ||
1291 | return r; | ||
1292 | break; | ||
1293 | case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: | ||
1294 | r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk); | ||
1295 | if (r) | ||
1296 | return r; | ||
1297 | break; | ||
1203 | } | 1298 | } |
1204 | } | 1299 | } |
1205 | 1300 | ||
@@ -1210,8 +1305,17 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) | |||
1210 | { | 1305 | { |
1211 | int i; | 1306 | int i; |
1212 | 1307 | ||
1213 | for (i = 0; i < p->num_post_dep_syncobjs; ++i) | 1308 | for (i = 0; i < p->num_post_deps; ++i) { |
1214 | drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence); | 1309 | if (p->post_deps[i].chain && p->post_deps[i].point) { |
1310 | drm_syncobj_add_point(p->post_deps[i].syncobj, | ||
1311 | p->post_deps[i].chain, | ||
1312 | p->fence, p->post_deps[i].point); | ||
1313 | p->post_deps[i].chain = NULL; | ||
1314 | } else { | ||
1315 | drm_syncobj_replace_fence(p->post_deps[i].syncobj, | ||
1316 | p->fence); | ||
1317 | } | ||
1318 | } | ||
1215 | } | 1319 | } |
1216 | 1320 | ||
1217 | static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | 1321 | static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f34e3ab5a9f3..1e2cc9d68a05 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | |||
@@ -75,9 +75,10 @@ | |||
75 | * - 3.29.0 - Add AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID | 75 | * - 3.29.0 - Add AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID |
76 | * - 3.30.0 - Add AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE. | 76 | * - 3.30.0 - Add AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE. |
77 | * - 3.31.0 - Add support for per-flip tiling attribute changes with DC | 77 | * - 3.31.0 - Add support for per-flip tiling attribute changes with DC |
78 | * - 3.32.0 - Add syncobj timeline support to AMDGPU_CS. | ||
78 | */ | 79 | */ |
79 | #define KMS_DRIVER_MAJOR 3 | 80 | #define KMS_DRIVER_MAJOR 3 |
80 | #define KMS_DRIVER_MINOR 31 | 81 | #define KMS_DRIVER_MINOR 32 |
81 | #define KMS_DRIVER_PATCHLEVEL 0 | 82 | #define KMS_DRIVER_PATCHLEVEL 0 |
82 | 83 | ||
83 | int amdgpu_vram_limit = 0; | 84 | int amdgpu_vram_limit = 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index ee47c11e92ce..4dee2326b29c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |||
@@ -136,8 +136,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, | |||
136 | { | 136 | { |
137 | struct amdgpu_device *adev = ring->adev; | 137 | struct amdgpu_device *adev = ring->adev; |
138 | struct amdgpu_fence *fence; | 138 | struct amdgpu_fence *fence; |
139 | struct dma_fence *old, **ptr; | 139 | struct dma_fence __rcu **ptr; |
140 | uint32_t seq; | 140 | uint32_t seq; |
141 | int r; | ||
141 | 142 | ||
142 | fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); | 143 | fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); |
143 | if (fence == NULL) | 144 | if (fence == NULL) |
@@ -153,15 +154,24 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, | |||
153 | seq, flags | AMDGPU_FENCE_FLAG_INT); | 154 | seq, flags | AMDGPU_FENCE_FLAG_INT); |
154 | 155 | ||
155 | ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; | 156 | ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; |
157 | if (unlikely(rcu_dereference_protected(*ptr, 1))) { | ||
158 | struct dma_fence *old; | ||
159 | |||
160 | rcu_read_lock(); | ||
161 | old = dma_fence_get_rcu_safe(ptr); | ||
162 | rcu_read_unlock(); | ||
163 | |||
164 | if (old) { | ||
165 | r = dma_fence_wait(old, false); | ||
166 | dma_fence_put(old); | ||
167 | if (r) | ||
168 | return r; | ||
169 | } | ||
170 | } | ||
171 | |||
156 | /* This function can't be called concurrently anyway, otherwise | 172 | /* This function can't be called concurrently anyway, otherwise |
157 | * emitting the fence would mess up the hardware ring buffer. | 173 | * emitting the fence would mess up the hardware ring buffer. |
158 | */ | 174 | */ |
159 | old = rcu_dereference_protected(*ptr, 1); | ||
160 | if (old && !dma_fence_is_signaled(old)) { | ||
161 | DRM_INFO("rcu slot is busy\n"); | ||
162 | dma_fence_wait(old, false); | ||
163 | } | ||
164 | |||
165 | rcu_assign_pointer(*ptr, dma_fence_get(&fence->base)); | 175 | rcu_assign_pointer(*ptr, dma_fence_get(&fence->base)); |
166 | 176 | ||
167 | *f = &fence->base; | 177 | *f = &fence->base; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 336834797af3..a48c84c51775 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/list.h> | 24 | #include <linux/list.h> |
25 | #include "amdgpu.h" | 25 | #include "amdgpu.h" |
26 | #include "amdgpu_xgmi.h" | 26 | #include "amdgpu_xgmi.h" |
27 | #include "amdgpu_smu.h" | ||
27 | 28 | ||
28 | 29 | ||
29 | static DEFINE_MUTEX(xgmi_mutex); | 30 | static DEFINE_MUTEX(xgmi_mutex); |
@@ -216,7 +217,17 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate) | |||
216 | 217 | ||
217 | if (hive->pstate == pstate) | 218 | if (hive->pstate == pstate) |
218 | return 0; | 219 | return 0; |
219 | /* Todo : sent the message to SMU for pstate change */ | 220 | |
221 | dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate); | ||
222 | |||
223 | if (is_support_sw_smu(adev)) | ||
224 | ret = smu_set_xgmi_pstate(&adev->smu, pstate); | ||
225 | if (ret) | ||
226 | dev_err(adev->dev, | ||
227 | "XGMI: Set pstate failure on device %llx, hive %llx, ret %d", | ||
228 | adev->gmc.xgmi.node_id, | ||
229 | adev->gmc.xgmi.hive_id, ret); | ||
230 | |||
220 | return ret; | 231 | return ret; |
221 | } | 232 | } |
222 | 233 | ||
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 9f7d399b73d4..923cb5ca5a57 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
@@ -2085,7 +2085,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) | |||
2085 | if (!plane->blends_with_above || !plane->blends_with_below) | 2085 | if (!plane->blends_with_above || !plane->blends_with_below) |
2086 | continue; | 2086 | continue; |
2087 | 2087 | ||
2088 | if (!plane->supports_argb8888) | 2088 | if (!plane->pixel_format_support.argb8888) |
2089 | continue; | 2089 | continue; |
2090 | 2090 | ||
2091 | if (initialize_plane(dm, NULL, primary_planes + i, | 2091 | if (initialize_plane(dm, NULL, primary_planes + i, |
@@ -2385,56 +2385,63 @@ static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { | |||
2385 | .destroy = amdgpu_dm_encoder_destroy, | 2385 | .destroy = amdgpu_dm_encoder_destroy, |
2386 | }; | 2386 | }; |
2387 | 2387 | ||
2388 | static bool fill_rects_from_plane_state(const struct drm_plane_state *state, | 2388 | |
2389 | struct dc_plane_state *plane_state) | 2389 | static int fill_dc_scaling_info(const struct drm_plane_state *state, |
2390 | struct dc_scaling_info *scaling_info) | ||
2390 | { | 2391 | { |
2391 | plane_state->src_rect.x = state->src_x >> 16; | 2392 | int scale_w, scale_h; |
2392 | plane_state->src_rect.y = state->src_y >> 16; | ||
2393 | /* we ignore the mantissa for now and do not deal with floating pixels :( */ | ||
2394 | plane_state->src_rect.width = state->src_w >> 16; | ||
2395 | 2393 | ||
2396 | if (plane_state->src_rect.width == 0) | 2394 | memset(scaling_info, 0, sizeof(*scaling_info)); |
2397 | return false; | ||
2398 | 2395 | ||
2399 | plane_state->src_rect.height = state->src_h >> 16; | 2396 | /* Source is fixed 16.16 but we ignore mantissa for now... */ |
2400 | if (plane_state->src_rect.height == 0) | 2397 | scaling_info->src_rect.x = state->src_x >> 16; |
2401 | return false; | 2398 | scaling_info->src_rect.y = state->src_y >> 16; |
2402 | 2399 | ||
2403 | plane_state->dst_rect.x = state->crtc_x; | 2400 | scaling_info->src_rect.width = state->src_w >> 16; |
2404 | plane_state->dst_rect.y = state->crtc_y; | 2401 | if (scaling_info->src_rect.width == 0) |
2402 | return -EINVAL; | ||
2403 | |||
2404 | scaling_info->src_rect.height = state->src_h >> 16; | ||
2405 | if (scaling_info->src_rect.height == 0) | ||
2406 | return -EINVAL; | ||
2407 | |||
2408 | scaling_info->dst_rect.x = state->crtc_x; | ||
2409 | scaling_info->dst_rect.y = state->crtc_y; | ||
2405 | 2410 | ||
2406 | if (state->crtc_w == 0) | 2411 | if (state->crtc_w == 0) |
2407 | return false; | 2412 | return -EINVAL; |
2408 | 2413 | ||
2409 | plane_state->dst_rect.width = state->crtc_w; | 2414 | scaling_info->dst_rect.width = state->crtc_w; |
2410 | 2415 | ||
2411 | if (state->crtc_h == 0) | 2416 | if (state->crtc_h == 0) |
2412 | return false; | 2417 | return -EINVAL; |
2413 | 2418 | ||
2414 | plane_state->dst_rect.height = state->crtc_h; | 2419 | scaling_info->dst_rect.height = state->crtc_h; |
2415 | 2420 | ||
2416 | plane_state->clip_rect = plane_state->dst_rect; | 2421 | /* DRM doesn't specify clipping on destination output. */ |
2422 | scaling_info->clip_rect = scaling_info->dst_rect; | ||
2417 | 2423 | ||
2418 | switch (state->rotation & DRM_MODE_ROTATE_MASK) { | 2424 | /* TODO: Validate scaling per-format with DC plane caps */ |
2419 | case DRM_MODE_ROTATE_0: | 2425 | scale_w = scaling_info->dst_rect.width * 1000 / |
2420 | plane_state->rotation = ROTATION_ANGLE_0; | 2426 | scaling_info->src_rect.width; |
2421 | break; | ||
2422 | case DRM_MODE_ROTATE_90: | ||
2423 | plane_state->rotation = ROTATION_ANGLE_90; | ||
2424 | break; | ||
2425 | case DRM_MODE_ROTATE_180: | ||
2426 | plane_state->rotation = ROTATION_ANGLE_180; | ||
2427 | break; | ||
2428 | case DRM_MODE_ROTATE_270: | ||
2429 | plane_state->rotation = ROTATION_ANGLE_270; | ||
2430 | break; | ||
2431 | default: | ||
2432 | plane_state->rotation = ROTATION_ANGLE_0; | ||
2433 | break; | ||
2434 | } | ||
2435 | 2427 | ||
2436 | return true; | 2428 | if (scale_w < 250 || scale_w > 16000) |
2429 | return -EINVAL; | ||
2430 | |||
2431 | scale_h = scaling_info->dst_rect.height * 1000 / | ||
2432 | scaling_info->src_rect.height; | ||
2433 | |||
2434 | if (scale_h < 250 || scale_h > 16000) | ||
2435 | return -EINVAL; | ||
2436 | |||
2437 | /* | ||
2438 | * The "scaling_quality" can be ignored for now, quality = 0 has DC | ||
2439 | * assume reasonable defaults based on the format. | ||
2440 | */ | ||
2441 | |||
2442 | return 0; | ||
2437 | } | 2443 | } |
2444 | |||
2438 | static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb, | 2445 | static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb, |
2439 | uint64_t *tiling_flags) | 2446 | uint64_t *tiling_flags) |
2440 | { | 2447 | { |
@@ -2463,12 +2470,16 @@ static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags) | |||
2463 | return offset ? (address + offset * 256) : 0; | 2470 | return offset ? (address + offset * 256) : 0; |
2464 | } | 2471 | } |
2465 | 2472 | ||
2466 | static int fill_plane_dcc_attributes(struct amdgpu_device *adev, | 2473 | static int |
2467 | const struct amdgpu_framebuffer *afb, | 2474 | fill_plane_dcc_attributes(struct amdgpu_device *adev, |
2468 | const struct dc_plane_state *plane_state, | 2475 | const struct amdgpu_framebuffer *afb, |
2469 | struct dc_plane_dcc_param *dcc, | 2476 | const enum surface_pixel_format format, |
2470 | struct dc_plane_address *address, | 2477 | const enum dc_rotation_angle rotation, |
2471 | uint64_t info) | 2478 | const union plane_size *plane_size, |
2479 | const union dc_tiling_info *tiling_info, | ||
2480 | const uint64_t info, | ||
2481 | struct dc_plane_dcc_param *dcc, | ||
2482 | struct dc_plane_address *address) | ||
2472 | { | 2483 | { |
2473 | struct dc *dc = adev->dm.dc; | 2484 | struct dc *dc = adev->dm.dc; |
2474 | struct dc_dcc_surface_param input; | 2485 | struct dc_dcc_surface_param input; |
@@ -2483,24 +2494,20 @@ static int fill_plane_dcc_attributes(struct amdgpu_device *adev, | |||
2483 | if (!offset) | 2494 | if (!offset) |
2484 | return 0; | 2495 | return 0; |
2485 | 2496 | ||
2486 | if (plane_state->address.type != PLN_ADDR_TYPE_GRAPHICS) | 2497 | if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) |
2487 | return 0; | 2498 | return 0; |
2488 | 2499 | ||
2489 | if (!dc->cap_funcs.get_dcc_compression_cap) | 2500 | if (!dc->cap_funcs.get_dcc_compression_cap) |
2490 | return -EINVAL; | 2501 | return -EINVAL; |
2491 | 2502 | ||
2492 | input.format = plane_state->format; | 2503 | input.format = format; |
2493 | input.surface_size.width = | 2504 | input.surface_size.width = plane_size->grph.surface_size.width; |
2494 | plane_state->plane_size.grph.surface_size.width; | 2505 | input.surface_size.height = plane_size->grph.surface_size.height; |
2495 | input.surface_size.height = | 2506 | input.swizzle_mode = tiling_info->gfx9.swizzle; |
2496 | plane_state->plane_size.grph.surface_size.height; | ||
2497 | input.swizzle_mode = plane_state->tiling_info.gfx9.swizzle; | ||
2498 | 2507 | ||
2499 | if (plane_state->rotation == ROTATION_ANGLE_0 || | 2508 | if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180) |
2500 | plane_state->rotation == ROTATION_ANGLE_180) | ||
2501 | input.scan = SCAN_DIRECTION_HORIZONTAL; | 2509 | input.scan = SCAN_DIRECTION_HORIZONTAL; |
2502 | else if (plane_state->rotation == ROTATION_ANGLE_90 || | 2510 | else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270) |
2503 | plane_state->rotation == ROTATION_ANGLE_270) | ||
2504 | input.scan = SCAN_DIRECTION_VERTICAL; | 2511 | input.scan = SCAN_DIRECTION_VERTICAL; |
2505 | 2512 | ||
2506 | if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output)) | 2513 | if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output)) |
@@ -2525,28 +2532,54 @@ static int fill_plane_dcc_attributes(struct amdgpu_device *adev, | |||
2525 | } | 2532 | } |
2526 | 2533 | ||
2527 | static int | 2534 | static int |
2528 | fill_plane_tiling_attributes(struct amdgpu_device *adev, | 2535 | fill_plane_buffer_attributes(struct amdgpu_device *adev, |
2529 | const struct amdgpu_framebuffer *afb, | 2536 | const struct amdgpu_framebuffer *afb, |
2530 | const struct dc_plane_state *plane_state, | 2537 | const enum surface_pixel_format format, |
2538 | const enum dc_rotation_angle rotation, | ||
2539 | const uint64_t tiling_flags, | ||
2531 | union dc_tiling_info *tiling_info, | 2540 | union dc_tiling_info *tiling_info, |
2541 | union plane_size *plane_size, | ||
2532 | struct dc_plane_dcc_param *dcc, | 2542 | struct dc_plane_dcc_param *dcc, |
2533 | struct dc_plane_address *address, | 2543 | struct dc_plane_address *address) |
2534 | uint64_t tiling_flags) | ||
2535 | { | 2544 | { |
2545 | const struct drm_framebuffer *fb = &afb->base; | ||
2536 | int ret; | 2546 | int ret; |
2537 | 2547 | ||
2538 | memset(tiling_info, 0, sizeof(*tiling_info)); | 2548 | memset(tiling_info, 0, sizeof(*tiling_info)); |
2549 | memset(plane_size, 0, sizeof(*plane_size)); | ||
2539 | memset(dcc, 0, sizeof(*dcc)); | 2550 | memset(dcc, 0, sizeof(*dcc)); |
2540 | memset(address, 0, sizeof(*address)); | 2551 | memset(address, 0, sizeof(*address)); |
2541 | 2552 | ||
2542 | if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { | 2553 | if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { |
2554 | plane_size->grph.surface_size.x = 0; | ||
2555 | plane_size->grph.surface_size.y = 0; | ||
2556 | plane_size->grph.surface_size.width = fb->width; | ||
2557 | plane_size->grph.surface_size.height = fb->height; | ||
2558 | plane_size->grph.surface_pitch = | ||
2559 | fb->pitches[0] / fb->format->cpp[0]; | ||
2560 | |||
2543 | address->type = PLN_ADDR_TYPE_GRAPHICS; | 2561 | address->type = PLN_ADDR_TYPE_GRAPHICS; |
2544 | address->grph.addr.low_part = lower_32_bits(afb->address); | 2562 | address->grph.addr.low_part = lower_32_bits(afb->address); |
2545 | address->grph.addr.high_part = upper_32_bits(afb->address); | 2563 | address->grph.addr.high_part = upper_32_bits(afb->address); |
2546 | } else { | 2564 | } else { |
2547 | const struct drm_framebuffer *fb = &afb->base; | ||
2548 | uint64_t chroma_addr = afb->address + fb->offsets[1]; | 2565 | uint64_t chroma_addr = afb->address + fb->offsets[1]; |
2549 | 2566 | ||
2567 | plane_size->video.luma_size.x = 0; | ||
2568 | plane_size->video.luma_size.y = 0; | ||
2569 | plane_size->video.luma_size.width = fb->width; | ||
2570 | plane_size->video.luma_size.height = fb->height; | ||
2571 | plane_size->video.luma_pitch = | ||
2572 | fb->pitches[0] / fb->format->cpp[0]; | ||
2573 | |||
2574 | plane_size->video.chroma_size.x = 0; | ||
2575 | plane_size->video.chroma_size.y = 0; | ||
2576 | /* TODO: set these based on surface format */ | ||
2577 | plane_size->video.chroma_size.width = fb->width / 2; | ||
2578 | plane_size->video.chroma_size.height = fb->height / 2; | ||
2579 | |||
2580 | plane_size->video.chroma_pitch = | ||
2581 | fb->pitches[1] / fb->format->cpp[1]; | ||
2582 | |||
2550 | address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; | 2583 | address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; |
2551 | address->video_progressive.luma_addr.low_part = | 2584 | address->video_progressive.luma_addr.low_part = |
2552 | lower_32_bits(afb->address); | 2585 | lower_32_bits(afb->address); |
@@ -2607,8 +2640,9 @@ fill_plane_tiling_attributes(struct amdgpu_device *adev, | |||
2607 | AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE); | 2640 | AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE); |
2608 | tiling_info->gfx9.shaderEnable = 1; | 2641 | tiling_info->gfx9.shaderEnable = 1; |
2609 | 2642 | ||
2610 | ret = fill_plane_dcc_attributes(adev, afb, plane_state, dcc, | 2643 | ret = fill_plane_dcc_attributes(adev, afb, format, rotation, |
2611 | address, tiling_flags); | 2644 | plane_size, tiling_info, |
2645 | tiling_flags, dcc, address); | ||
2612 | if (ret) | 2646 | if (ret) |
2613 | return ret; | 2647 | return ret; |
2614 | } | 2648 | } |
@@ -2616,112 +2650,8 @@ fill_plane_tiling_attributes(struct amdgpu_device *adev, | |||
2616 | return 0; | 2650 | return 0; |
2617 | } | 2651 | } |
2618 | 2652 | ||
2619 | static int fill_plane_attributes_from_fb(struct amdgpu_device *adev, | ||
2620 | struct dc_plane_state *plane_state, | ||
2621 | const struct amdgpu_framebuffer *amdgpu_fb) | ||
2622 | { | ||
2623 | uint64_t tiling_flags; | ||
2624 | const struct drm_framebuffer *fb = &amdgpu_fb->base; | ||
2625 | int ret = 0; | ||
2626 | struct drm_format_name_buf format_name; | ||
2627 | |||
2628 | ret = get_fb_info( | ||
2629 | amdgpu_fb, | ||
2630 | &tiling_flags); | ||
2631 | |||
2632 | if (ret) | ||
2633 | return ret; | ||
2634 | |||
2635 | switch (fb->format->format) { | ||
2636 | case DRM_FORMAT_C8: | ||
2637 | plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; | ||
2638 | break; | ||
2639 | case DRM_FORMAT_RGB565: | ||
2640 | plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; | ||
2641 | break; | ||
2642 | case DRM_FORMAT_XRGB8888: | ||
2643 | case DRM_FORMAT_ARGB8888: | ||
2644 | plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; | ||
2645 | break; | ||
2646 | case DRM_FORMAT_XRGB2101010: | ||
2647 | case DRM_FORMAT_ARGB2101010: | ||
2648 | plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010; | ||
2649 | break; | ||
2650 | case DRM_FORMAT_XBGR2101010: | ||
2651 | case DRM_FORMAT_ABGR2101010: | ||
2652 | plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; | ||
2653 | break; | ||
2654 | case DRM_FORMAT_XBGR8888: | ||
2655 | case DRM_FORMAT_ABGR8888: | ||
2656 | plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888; | ||
2657 | break; | ||
2658 | case DRM_FORMAT_NV21: | ||
2659 | plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr; | ||
2660 | break; | ||
2661 | case DRM_FORMAT_NV12: | ||
2662 | plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; | ||
2663 | break; | ||
2664 | default: | ||
2665 | DRM_ERROR("Unsupported screen format %s\n", | ||
2666 | drm_get_format_name(fb->format->format, &format_name)); | ||
2667 | return -EINVAL; | ||
2668 | } | ||
2669 | |||
2670 | memset(&plane_state->address, 0, sizeof(plane_state->address)); | ||
2671 | |||
2672 | if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { | ||
2673 | plane_state->plane_size.grph.surface_size.x = 0; | ||
2674 | plane_state->plane_size.grph.surface_size.y = 0; | ||
2675 | plane_state->plane_size.grph.surface_size.width = fb->width; | ||
2676 | plane_state->plane_size.grph.surface_size.height = fb->height; | ||
2677 | plane_state->plane_size.grph.surface_pitch = | ||
2678 | fb->pitches[0] / fb->format->cpp[0]; | ||
2679 | /* TODO: unhardcode */ | ||
2680 | plane_state->color_space = COLOR_SPACE_SRGB; | ||
2681 | |||
2682 | } else { | ||
2683 | plane_state->plane_size.video.luma_size.x = 0; | ||
2684 | plane_state->plane_size.video.luma_size.y = 0; | ||
2685 | plane_state->plane_size.video.luma_size.width = fb->width; | ||
2686 | plane_state->plane_size.video.luma_size.height = fb->height; | ||
2687 | plane_state->plane_size.video.luma_pitch = | ||
2688 | fb->pitches[0] / fb->format->cpp[0]; | ||
2689 | |||
2690 | plane_state->plane_size.video.chroma_size.x = 0; | ||
2691 | plane_state->plane_size.video.chroma_size.y = 0; | ||
2692 | /* TODO: set these based on surface format */ | ||
2693 | plane_state->plane_size.video.chroma_size.width = fb->width / 2; | ||
2694 | plane_state->plane_size.video.chroma_size.height = fb->height / 2; | ||
2695 | |||
2696 | plane_state->plane_size.video.chroma_pitch = | ||
2697 | fb->pitches[1] / fb->format->cpp[1]; | ||
2698 | |||
2699 | /* TODO: unhardcode */ | ||
2700 | plane_state->color_space = COLOR_SPACE_YCBCR709; | ||
2701 | } | ||
2702 | |||
2703 | fill_plane_tiling_attributes(adev, amdgpu_fb, plane_state, | ||
2704 | &plane_state->tiling_info, | ||
2705 | &plane_state->dcc, | ||
2706 | &plane_state->address, | ||
2707 | tiling_flags); | ||
2708 | |||
2709 | plane_state->visible = true; | ||
2710 | plane_state->scaling_quality.h_taps_c = 0; | ||
2711 | plane_state->scaling_quality.v_taps_c = 0; | ||
2712 | |||
2713 | /* is this needed? is plane_state zeroed at allocation? */ | ||
2714 | plane_state->scaling_quality.h_taps = 0; | ||
2715 | plane_state->scaling_quality.v_taps = 0; | ||
2716 | plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE; | ||
2717 | |||
2718 | return ret; | ||
2719 | |||
2720 | } | ||
2721 | |||
2722 | static void | 2653 | static void |
2723 | fill_blending_from_plane_state(struct drm_plane_state *plane_state, | 2654 | fill_blending_from_plane_state(const struct drm_plane_state *plane_state, |
2724 | const struct dc_plane_state *dc_plane_state, | ||
2725 | bool *per_pixel_alpha, bool *global_alpha, | 2655 | bool *per_pixel_alpha, bool *global_alpha, |
2726 | int *global_alpha_value) | 2656 | int *global_alpha_value) |
2727 | { | 2657 | { |
@@ -2757,7 +2687,7 @@ fill_blending_from_plane_state(struct drm_plane_state *plane_state, | |||
2757 | 2687 | ||
2758 | static int | 2688 | static int |
2759 | fill_plane_color_attributes(const struct drm_plane_state *plane_state, | 2689 | fill_plane_color_attributes(const struct drm_plane_state *plane_state, |
2760 | const struct dc_plane_state *dc_plane_state, | 2690 | const enum surface_pixel_format format, |
2761 | enum dc_color_space *color_space) | 2691 | enum dc_color_space *color_space) |
2762 | { | 2692 | { |
2763 | bool full_range; | 2693 | bool full_range; |
@@ -2765,7 +2695,7 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state, | |||
2765 | *color_space = COLOR_SPACE_SRGB; | 2695 | *color_space = COLOR_SPACE_SRGB; |
2766 | 2696 | ||
2767 | /* DRM color properties only affect non-RGB formats. */ | 2697 | /* DRM color properties only affect non-RGB formats. */ |
2768 | if (dc_plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) | 2698 | if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) |
2769 | return 0; | 2699 | return 0; |
2770 | 2700 | ||
2771 | full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE); | 2701 | full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE); |
@@ -2799,32 +2729,144 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state, | |||
2799 | return 0; | 2729 | return 0; |
2800 | } | 2730 | } |
2801 | 2731 | ||
2802 | static int fill_plane_attributes(struct amdgpu_device *adev, | 2732 | static int |
2803 | struct dc_plane_state *dc_plane_state, | 2733 | fill_dc_plane_info_and_addr(struct amdgpu_device *adev, |
2804 | struct drm_plane_state *plane_state, | 2734 | const struct drm_plane_state *plane_state, |
2805 | struct drm_crtc_state *crtc_state) | 2735 | const uint64_t tiling_flags, |
2736 | struct dc_plane_info *plane_info, | ||
2737 | struct dc_plane_address *address) | ||
2738 | { | ||
2739 | const struct drm_framebuffer *fb = plane_state->fb; | ||
2740 | const struct amdgpu_framebuffer *afb = | ||
2741 | to_amdgpu_framebuffer(plane_state->fb); | ||
2742 | struct drm_format_name_buf format_name; | ||
2743 | int ret; | ||
2744 | |||
2745 | memset(plane_info, 0, sizeof(*plane_info)); | ||
2746 | |||
2747 | switch (fb->format->format) { | ||
2748 | case DRM_FORMAT_C8: | ||
2749 | plane_info->format = | ||
2750 | SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; | ||
2751 | break; | ||
2752 | case DRM_FORMAT_RGB565: | ||
2753 | plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; | ||
2754 | break; | ||
2755 | case DRM_FORMAT_XRGB8888: | ||
2756 | case DRM_FORMAT_ARGB8888: | ||
2757 | plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; | ||
2758 | break; | ||
2759 | case DRM_FORMAT_XRGB2101010: | ||
2760 | case DRM_FORMAT_ARGB2101010: | ||
2761 | plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010; | ||
2762 | break; | ||
2763 | case DRM_FORMAT_XBGR2101010: | ||
2764 | case DRM_FORMAT_ABGR2101010: | ||
2765 | plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; | ||
2766 | break; | ||
2767 | case DRM_FORMAT_XBGR8888: | ||
2768 | case DRM_FORMAT_ABGR8888: | ||
2769 | plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888; | ||
2770 | break; | ||
2771 | case DRM_FORMAT_NV21: | ||
2772 | plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr; | ||
2773 | break; | ||
2774 | case DRM_FORMAT_NV12: | ||
2775 | plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; | ||
2776 | break; | ||
2777 | default: | ||
2778 | DRM_ERROR( | ||
2779 | "Unsupported screen format %s\n", | ||
2780 | drm_get_format_name(fb->format->format, &format_name)); | ||
2781 | return -EINVAL; | ||
2782 | } | ||
2783 | |||
2784 | switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { | ||
2785 | case DRM_MODE_ROTATE_0: | ||
2786 | plane_info->rotation = ROTATION_ANGLE_0; | ||
2787 | break; | ||
2788 | case DRM_MODE_ROTATE_90: | ||
2789 | plane_info->rotation = ROTATION_ANGLE_90; | ||
2790 | break; | ||
2791 | case DRM_MODE_ROTATE_180: | ||
2792 | plane_info->rotation = ROTATION_ANGLE_180; | ||
2793 | break; | ||
2794 | case DRM_MODE_ROTATE_270: | ||
2795 | plane_info->rotation = ROTATION_ANGLE_270; | ||
2796 | break; | ||
2797 | default: | ||
2798 | plane_info->rotation = ROTATION_ANGLE_0; | ||
2799 | break; | ||
2800 | } | ||
2801 | |||
2802 | plane_info->visible = true; | ||
2803 | plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE; | ||
2804 | |||
2805 | ret = fill_plane_color_attributes(plane_state, plane_info->format, | ||
2806 | &plane_info->color_space); | ||
2807 | if (ret) | ||
2808 | return ret; | ||
2809 | |||
2810 | ret = fill_plane_buffer_attributes(adev, afb, plane_info->format, | ||
2811 | plane_info->rotation, tiling_flags, | ||
2812 | &plane_info->tiling_info, | ||
2813 | &plane_info->plane_size, | ||
2814 | &plane_info->dcc, address); | ||
2815 | if (ret) | ||
2816 | return ret; | ||
2817 | |||
2818 | fill_blending_from_plane_state( | ||
2819 | plane_state, &plane_info->per_pixel_alpha, | ||
2820 | &plane_info->global_alpha, &plane_info->global_alpha_value); | ||
2821 | |||
2822 | return 0; | ||
2823 | } | ||
2824 | |||
2825 | static int fill_dc_plane_attributes(struct amdgpu_device *adev, | ||
2826 | struct dc_plane_state *dc_plane_state, | ||
2827 | struct drm_plane_state *plane_state, | ||
2828 | struct drm_crtc_state *crtc_state) | ||
2806 | { | 2829 | { |
2807 | const struct amdgpu_framebuffer *amdgpu_fb = | 2830 | const struct amdgpu_framebuffer *amdgpu_fb = |
2808 | to_amdgpu_framebuffer(plane_state->fb); | 2831 | to_amdgpu_framebuffer(plane_state->fb); |
2809 | const struct drm_crtc *crtc = plane_state->crtc; | 2832 | struct dc_scaling_info scaling_info; |
2810 | int ret = 0; | 2833 | struct dc_plane_info plane_info; |
2834 | uint64_t tiling_flags; | ||
2835 | int ret; | ||
2811 | 2836 | ||
2812 | if (!fill_rects_from_plane_state(plane_state, dc_plane_state)) | 2837 | ret = fill_dc_scaling_info(plane_state, &scaling_info); |
2813 | return -EINVAL; | 2838 | if (ret) |
2839 | return ret; | ||
2814 | 2840 | ||
2815 | ret = fill_plane_attributes_from_fb( | 2841 | dc_plane_state->src_rect = scaling_info.src_rect; |
2816 | crtc->dev->dev_private, | 2842 | dc_plane_state->dst_rect = scaling_info.dst_rect; |
2817 | dc_plane_state, | 2843 | dc_plane_state->clip_rect = scaling_info.clip_rect; |
2818 | amdgpu_fb); | 2844 | dc_plane_state->scaling_quality = scaling_info.scaling_quality; |
2819 | 2845 | ||
2846 | ret = get_fb_info(amdgpu_fb, &tiling_flags); | ||
2820 | if (ret) | 2847 | if (ret) |
2821 | return ret; | 2848 | return ret; |
2822 | 2849 | ||
2823 | ret = fill_plane_color_attributes(plane_state, dc_plane_state, | 2850 | ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags, |
2824 | &dc_plane_state->color_space); | 2851 | &plane_info, |
2852 | &dc_plane_state->address); | ||
2825 | if (ret) | 2853 | if (ret) |
2826 | return ret; | 2854 | return ret; |
2827 | 2855 | ||
2856 | dc_plane_state->format = plane_info.format; | ||
2857 | dc_plane_state->color_space = plane_info.color_space; | ||
2858 | dc_plane_state->format = plane_info.format; | ||
2859 | dc_plane_state->plane_size = plane_info.plane_size; | ||
2860 | dc_plane_state->rotation = plane_info.rotation; | ||
2861 | dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror; | ||
2862 | dc_plane_state->stereo_format = plane_info.stereo_format; | ||
2863 | dc_plane_state->tiling_info = plane_info.tiling_info; | ||
2864 | dc_plane_state->visible = plane_info.visible; | ||
2865 | dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha; | ||
2866 | dc_plane_state->global_alpha = plane_info.global_alpha; | ||
2867 | dc_plane_state->global_alpha_value = plane_info.global_alpha_value; | ||
2868 | dc_plane_state->dcc = plane_info.dcc; | ||
2869 | |||
2828 | /* | 2870 | /* |
2829 | * Always set input transfer function, since plane state is refreshed | 2871 | * Always set input transfer function, since plane state is refreshed |
2830 | * every time. | 2872 | * every time. |
@@ -2835,11 +2877,6 @@ static int fill_plane_attributes(struct amdgpu_device *adev, | |||
2835 | dc_plane_state->in_transfer_func = NULL; | 2877 | dc_plane_state->in_transfer_func = NULL; |
2836 | } | 2878 | } |
2837 | 2879 | ||
2838 | fill_blending_from_plane_state(plane_state, dc_plane_state, | ||
2839 | &dc_plane_state->per_pixel_alpha, | ||
2840 | &dc_plane_state->global_alpha, | ||
2841 | &dc_plane_state->global_alpha_value); | ||
2842 | |||
2843 | return ret; | 2880 | return ret; |
2844 | } | 2881 | } |
2845 | 2882 | ||
@@ -3825,6 +3862,38 @@ static void dm_crtc_helper_disable(struct drm_crtc *crtc) | |||
3825 | { | 3862 | { |
3826 | } | 3863 | } |
3827 | 3864 | ||
3865 | static bool does_crtc_have_active_plane(struct drm_crtc_state *new_crtc_state) | ||
3866 | { | ||
3867 | struct drm_atomic_state *state = new_crtc_state->state; | ||
3868 | struct drm_plane *plane; | ||
3869 | int num_active = 0; | ||
3870 | |||
3871 | drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) { | ||
3872 | struct drm_plane_state *new_plane_state; | ||
3873 | |||
3874 | /* Cursor planes are "fake". */ | ||
3875 | if (plane->type == DRM_PLANE_TYPE_CURSOR) | ||
3876 | continue; | ||
3877 | |||
3878 | new_plane_state = drm_atomic_get_new_plane_state(state, plane); | ||
3879 | |||
3880 | if (!new_plane_state) { | ||
3881 | /* | ||
3882 | * The plane is enable on the CRTC and hasn't changed | ||
3883 | * state. This means that it previously passed | ||
3884 | * validation and is therefore enabled. | ||
3885 | */ | ||
3886 | num_active += 1; | ||
3887 | continue; | ||
3888 | } | ||
3889 | |||
3890 | /* We need a framebuffer to be considered enabled. */ | ||
3891 | num_active += (new_plane_state->fb != NULL); | ||
3892 | } | ||
3893 | |||
3894 | return num_active > 0; | ||
3895 | } | ||
3896 | |||
3828 | static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, | 3897 | static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, |
3829 | struct drm_crtc_state *state) | 3898 | struct drm_crtc_state *state) |
3830 | { | 3899 | { |
@@ -3843,6 +3912,11 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, | |||
3843 | if (!dm_crtc_state->stream) | 3912 | if (!dm_crtc_state->stream) |
3844 | return 0; | 3913 | return 0; |
3845 | 3914 | ||
3915 | /* We want at least one hardware plane enabled to use the stream. */ | ||
3916 | if (state->enable && state->active && | ||
3917 | !does_crtc_have_active_plane(state)) | ||
3918 | return -EINVAL; | ||
3919 | |||
3846 | if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK) | 3920 | if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK) |
3847 | return 0; | 3921 | return 0; |
3848 | 3922 | ||
@@ -3994,9 +4068,11 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, | |||
3994 | dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { | 4068 | dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { |
3995 | struct dc_plane_state *plane_state = dm_plane_state_new->dc_state; | 4069 | struct dc_plane_state *plane_state = dm_plane_state_new->dc_state; |
3996 | 4070 | ||
3997 | fill_plane_tiling_attributes( | 4071 | fill_plane_buffer_attributes( |
3998 | adev, afb, plane_state, &plane_state->tiling_info, | 4072 | adev, afb, plane_state->format, plane_state->rotation, |
3999 | &plane_state->dcc, &plane_state->address, tiling_flags); | 4073 | tiling_flags, &plane_state->tiling_info, |
4074 | &plane_state->plane_size, &plane_state->dcc, | ||
4075 | &plane_state->address); | ||
4000 | } | 4076 | } |
4001 | 4077 | ||
4002 | return 0; | 4078 | return 0; |
@@ -4028,13 +4104,18 @@ static int dm_plane_atomic_check(struct drm_plane *plane, | |||
4028 | { | 4104 | { |
4029 | struct amdgpu_device *adev = plane->dev->dev_private; | 4105 | struct amdgpu_device *adev = plane->dev->dev_private; |
4030 | struct dc *dc = adev->dm.dc; | 4106 | struct dc *dc = adev->dm.dc; |
4031 | struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); | 4107 | struct dm_plane_state *dm_plane_state; |
4108 | struct dc_scaling_info scaling_info; | ||
4109 | int ret; | ||
4110 | |||
4111 | dm_plane_state = to_dm_plane_state(state); | ||
4032 | 4112 | ||
4033 | if (!dm_plane_state->dc_state) | 4113 | if (!dm_plane_state->dc_state) |
4034 | return 0; | 4114 | return 0; |
4035 | 4115 | ||
4036 | if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state)) | 4116 | ret = fill_dc_scaling_info(state, &scaling_info); |
4037 | return -EINVAL; | 4117 | if (ret) |
4118 | return ret; | ||
4038 | 4119 | ||
4039 | if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) | 4120 | if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) |
4040 | return 0; | 4121 | return 0; |
@@ -4121,46 +4202,71 @@ static const u32 cursor_formats[] = { | |||
4121 | DRM_FORMAT_ARGB8888 | 4202 | DRM_FORMAT_ARGB8888 |
4122 | }; | 4203 | }; |
4123 | 4204 | ||
4124 | static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, | 4205 | static int get_plane_formats(const struct drm_plane *plane, |
4125 | struct drm_plane *plane, | 4206 | const struct dc_plane_cap *plane_cap, |
4126 | unsigned long possible_crtcs, | 4207 | uint32_t *formats, int max_formats) |
4127 | const struct dc_plane_cap *plane_cap) | ||
4128 | { | 4208 | { |
4129 | int res = -EPERM; | 4209 | int i, num_formats = 0; |
4210 | |||
4211 | /* | ||
4212 | * TODO: Query support for each group of formats directly from | ||
4213 | * DC plane caps. This will require adding more formats to the | ||
4214 | * caps list. | ||
4215 | */ | ||
4130 | 4216 | ||
4131 | switch (plane->type) { | 4217 | switch (plane->type) { |
4132 | case DRM_PLANE_TYPE_PRIMARY: | 4218 | case DRM_PLANE_TYPE_PRIMARY: |
4133 | res = drm_universal_plane_init( | 4219 | for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) { |
4134 | dm->adev->ddev, | 4220 | if (num_formats >= max_formats) |
4135 | plane, | 4221 | break; |
4136 | possible_crtcs, | 4222 | |
4137 | &dm_plane_funcs, | 4223 | formats[num_formats++] = rgb_formats[i]; |
4138 | rgb_formats, | 4224 | } |
4139 | ARRAY_SIZE(rgb_formats), | 4225 | |
4140 | NULL, plane->type, NULL); | 4226 | if (plane_cap && plane_cap->pixel_format_support.nv12) |
4227 | formats[num_formats++] = DRM_FORMAT_NV12; | ||
4141 | break; | 4228 | break; |
4229 | |||
4142 | case DRM_PLANE_TYPE_OVERLAY: | 4230 | case DRM_PLANE_TYPE_OVERLAY: |
4143 | res = drm_universal_plane_init( | 4231 | for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) { |
4144 | dm->adev->ddev, | 4232 | if (num_formats >= max_formats) |
4145 | plane, | 4233 | break; |
4146 | possible_crtcs, | 4234 | |
4147 | &dm_plane_funcs, | 4235 | formats[num_formats++] = overlay_formats[i]; |
4148 | overlay_formats, | 4236 | } |
4149 | ARRAY_SIZE(overlay_formats), | ||
4150 | NULL, plane->type, NULL); | ||
4151 | break; | 4237 | break; |
4238 | |||
4152 | case DRM_PLANE_TYPE_CURSOR: | 4239 | case DRM_PLANE_TYPE_CURSOR: |
4153 | res = drm_universal_plane_init( | 4240 | for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) { |
4154 | dm->adev->ddev, | 4241 | if (num_formats >= max_formats) |
4155 | plane, | 4242 | break; |
4156 | possible_crtcs, | 4243 | |
4157 | &dm_plane_funcs, | 4244 | formats[num_formats++] = cursor_formats[i]; |
4158 | cursor_formats, | 4245 | } |
4159 | ARRAY_SIZE(cursor_formats), | ||
4160 | NULL, plane->type, NULL); | ||
4161 | break; | 4246 | break; |
4162 | } | 4247 | } |
4163 | 4248 | ||
4249 | return num_formats; | ||
4250 | } | ||
4251 | |||
4252 | static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, | ||
4253 | struct drm_plane *plane, | ||
4254 | unsigned long possible_crtcs, | ||
4255 | const struct dc_plane_cap *plane_cap) | ||
4256 | { | ||
4257 | uint32_t formats[32]; | ||
4258 | int num_formats; | ||
4259 | int res = -EPERM; | ||
4260 | |||
4261 | num_formats = get_plane_formats(plane, plane_cap, formats, | ||
4262 | ARRAY_SIZE(formats)); | ||
4263 | |||
4264 | res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs, | ||
4265 | &dm_plane_funcs, formats, num_formats, | ||
4266 | NULL, plane->type, NULL); | ||
4267 | if (res) | ||
4268 | return res; | ||
4269 | |||
4164 | if (plane->type == DRM_PLANE_TYPE_OVERLAY && | 4270 | if (plane->type == DRM_PLANE_TYPE_OVERLAY && |
4165 | plane_cap && plane_cap->per_pixel_alpha) { | 4271 | plane_cap && plane_cap->per_pixel_alpha) { |
4166 | unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) | | 4272 | unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) | |
@@ -4170,14 +4276,25 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, | |||
4170 | drm_plane_create_blend_mode_property(plane, blend_caps); | 4276 | drm_plane_create_blend_mode_property(plane, blend_caps); |
4171 | } | 4277 | } |
4172 | 4278 | ||
4279 | if (plane->type == DRM_PLANE_TYPE_PRIMARY && | ||
4280 | plane_cap && plane_cap->pixel_format_support.nv12) { | ||
4281 | /* This only affects YUV formats. */ | ||
4282 | drm_plane_create_color_properties( | ||
4283 | plane, | ||
4284 | BIT(DRM_COLOR_YCBCR_BT601) | | ||
4285 | BIT(DRM_COLOR_YCBCR_BT709), | ||
4286 | BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | | ||
4287 | BIT(DRM_COLOR_YCBCR_FULL_RANGE), | ||
4288 | DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE); | ||
4289 | } | ||
4290 | |||
4173 | drm_plane_helper_add(plane, &dm_plane_helper_funcs); | 4291 | drm_plane_helper_add(plane, &dm_plane_helper_funcs); |
4174 | 4292 | ||
4175 | /* Create (reset) the plane state */ | 4293 | /* Create (reset) the plane state */ |
4176 | if (plane->funcs->reset) | 4294 | if (plane->funcs->reset) |
4177 | plane->funcs->reset(plane); | 4295 | plane->funcs->reset(plane); |
4178 | 4296 | ||
4179 | 4297 | return 0; | |
4180 | return res; | ||
4181 | } | 4298 | } |
4182 | 4299 | ||
4183 | static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, | 4300 | static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, |
@@ -4769,9 +4886,13 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, | |||
4769 | 4886 | ||
4770 | x = plane->state->crtc_x; | 4887 | x = plane->state->crtc_x; |
4771 | y = plane->state->crtc_y; | 4888 | y = plane->state->crtc_y; |
4772 | /* avivo cursor are offset into the total surface */ | 4889 | |
4773 | x += crtc->primary->state->src_x >> 16; | 4890 | if (crtc->primary->state) { |
4774 | y += crtc->primary->state->src_y >> 16; | 4891 | /* avivo cursor are offset into the total surface */ |
4892 | x += crtc->primary->state->src_x >> 16; | ||
4893 | y += crtc->primary->state->src_y >> 16; | ||
4894 | } | ||
4895 | |||
4775 | if (x < 0) { | 4896 | if (x < 0) { |
4776 | xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); | 4897 | xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); |
4777 | x = 0; | 4898 | x = 0; |
@@ -5046,7 +5167,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, | |||
5046 | struct drm_crtc *crtc = new_plane_state->crtc; | 5167 | struct drm_crtc *crtc = new_plane_state->crtc; |
5047 | struct drm_crtc_state *new_crtc_state; | 5168 | struct drm_crtc_state *new_crtc_state; |
5048 | struct drm_framebuffer *fb = new_plane_state->fb; | 5169 | struct drm_framebuffer *fb = new_plane_state->fb; |
5049 | struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb); | ||
5050 | bool plane_needs_flip; | 5170 | bool plane_needs_flip; |
5051 | struct dc_plane_state *dc_plane; | 5171 | struct dc_plane_state *dc_plane; |
5052 | struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); | 5172 | struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); |
@@ -5070,29 +5190,11 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, | |||
5070 | bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func; | 5190 | bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func; |
5071 | } | 5191 | } |
5072 | 5192 | ||
5193 | fill_dc_scaling_info(new_plane_state, | ||
5194 | &bundle->scaling_infos[planes_count]); | ||
5073 | 5195 | ||
5074 | bundle->scaling_infos[planes_count].scaling_quality = dc_plane->scaling_quality; | 5196 | bundle->surface_updates[planes_count].scaling_info = |
5075 | bundle->scaling_infos[planes_count].src_rect = dc_plane->src_rect; | 5197 | &bundle->scaling_infos[planes_count]; |
5076 | bundle->scaling_infos[planes_count].dst_rect = dc_plane->dst_rect; | ||
5077 | bundle->scaling_infos[planes_count].clip_rect = dc_plane->clip_rect; | ||
5078 | bundle->surface_updates[planes_count].scaling_info = &bundle->scaling_infos[planes_count]; | ||
5079 | |||
5080 | fill_plane_color_attributes( | ||
5081 | new_plane_state, dc_plane, | ||
5082 | &bundle->plane_infos[planes_count].color_space); | ||
5083 | |||
5084 | bundle->plane_infos[planes_count].format = dc_plane->format; | ||
5085 | bundle->plane_infos[planes_count].plane_size = dc_plane->plane_size; | ||
5086 | bundle->plane_infos[planes_count].rotation = dc_plane->rotation; | ||
5087 | bundle->plane_infos[planes_count].horizontal_mirror = dc_plane->horizontal_mirror; | ||
5088 | bundle->plane_infos[planes_count].stereo_format = dc_plane->stereo_format; | ||
5089 | bundle->plane_infos[planes_count].tiling_info = dc_plane->tiling_info; | ||
5090 | bundle->plane_infos[planes_count].visible = dc_plane->visible; | ||
5091 | bundle->plane_infos[planes_count].global_alpha = dc_plane->global_alpha; | ||
5092 | bundle->plane_infos[planes_count].global_alpha_value = dc_plane->global_alpha_value; | ||
5093 | bundle->plane_infos[planes_count].per_pixel_alpha = dc_plane->per_pixel_alpha; | ||
5094 | bundle->plane_infos[planes_count].dcc = dc_plane->dcc; | ||
5095 | bundle->surface_updates[planes_count].plane_info = &bundle->plane_infos[planes_count]; | ||
5096 | 5198 | ||
5097 | plane_needs_flip = old_plane_state->fb && new_plane_state->fb; | 5199 | plane_needs_flip = old_plane_state->fb && new_plane_state->fb; |
5098 | 5200 | ||
@@ -5124,11 +5226,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, | |||
5124 | 5226 | ||
5125 | amdgpu_bo_unreserve(abo); | 5227 | amdgpu_bo_unreserve(abo); |
5126 | 5228 | ||
5127 | fill_plane_tiling_attributes(dm->adev, afb, dc_plane, | 5229 | fill_dc_plane_info_and_addr( |
5128 | &bundle->plane_infos[planes_count].tiling_info, | 5230 | dm->adev, new_plane_state, tiling_flags, |
5129 | &bundle->plane_infos[planes_count].dcc, | 5231 | &bundle->plane_infos[planes_count], |
5130 | &bundle->flip_addrs[planes_count].address, | 5232 | &bundle->flip_addrs[planes_count].address); |
5131 | tiling_flags); | 5233 | |
5234 | bundle->surface_updates[planes_count].plane_info = | ||
5235 | &bundle->plane_infos[planes_count]; | ||
5132 | 5236 | ||
5133 | bundle->flip_addrs[planes_count].flip_immediate = | 5237 | bundle->flip_addrs[planes_count].flip_immediate = |
5134 | (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0; | 5238 | (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0; |
@@ -5812,21 +5916,12 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, | |||
5812 | struct amdgpu_dm_connector *aconnector = NULL; | 5916 | struct amdgpu_dm_connector *aconnector = NULL; |
5813 | struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; | 5917 | struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; |
5814 | struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; | 5918 | struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; |
5815 | struct drm_plane_state *new_plane_state = NULL; | ||
5816 | 5919 | ||
5817 | new_stream = NULL; | 5920 | new_stream = NULL; |
5818 | 5921 | ||
5819 | dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); | 5922 | dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); |
5820 | dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); | 5923 | dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); |
5821 | acrtc = to_amdgpu_crtc(crtc); | 5924 | acrtc = to_amdgpu_crtc(crtc); |
5822 | |||
5823 | new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary); | ||
5824 | |||
5825 | if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) { | ||
5826 | ret = -EINVAL; | ||
5827 | goto fail; | ||
5828 | } | ||
5829 | |||
5830 | aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); | 5925 | aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); |
5831 | 5926 | ||
5832 | /* TODO This hack should go away */ | 5927 | /* TODO This hack should go away */ |
@@ -6016,6 +6111,69 @@ fail: | |||
6016 | return ret; | 6111 | return ret; |
6017 | } | 6112 | } |
6018 | 6113 | ||
6114 | static bool should_reset_plane(struct drm_atomic_state *state, | ||
6115 | struct drm_plane *plane, | ||
6116 | struct drm_plane_state *old_plane_state, | ||
6117 | struct drm_plane_state *new_plane_state) | ||
6118 | { | ||
6119 | struct drm_plane *other; | ||
6120 | struct drm_plane_state *old_other_state, *new_other_state; | ||
6121 | struct drm_crtc_state *new_crtc_state; | ||
6122 | int i; | ||
6123 | |||
6124 | /* | ||
6125 | * TODO: Remove this hack once the checks below are sufficient | ||
6126 | * enough to determine when we need to reset all the planes on | ||
6127 | * the stream. | ||
6128 | */ | ||
6129 | if (state->allow_modeset) | ||
6130 | return true; | ||
6131 | |||
6132 | /* Exit early if we know that we're adding or removing the plane. */ | ||
6133 | if (old_plane_state->crtc != new_plane_state->crtc) | ||
6134 | return true; | ||
6135 | |||
6136 | /* old crtc == new_crtc == NULL, plane not in context. */ | ||
6137 | if (!new_plane_state->crtc) | ||
6138 | return false; | ||
6139 | |||
6140 | new_crtc_state = | ||
6141 | drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); | ||
6142 | |||
6143 | if (!new_crtc_state) | ||
6144 | return true; | ||
6145 | |||
6146 | if (drm_atomic_crtc_needs_modeset(new_crtc_state)) | ||
6147 | return true; | ||
6148 | |||
6149 | /* | ||
6150 | * If there are any new primary or overlay planes being added or | ||
6151 | * removed then the z-order can potentially change. To ensure | ||
6152 | * correct z-order and pipe acquisition the current DC architecture | ||
6153 | * requires us to remove and recreate all existing planes. | ||
6154 | * | ||
6155 | * TODO: Come up with a more elegant solution for this. | ||
6156 | */ | ||
6157 | for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { | ||
6158 | if (other->type == DRM_PLANE_TYPE_CURSOR) | ||
6159 | continue; | ||
6160 | |||
6161 | if (old_other_state->crtc != new_plane_state->crtc && | ||
6162 | new_other_state->crtc != new_plane_state->crtc) | ||
6163 | continue; | ||
6164 | |||
6165 | if (old_other_state->crtc != new_other_state->crtc) | ||
6166 | return true; | ||
6167 | |||
6168 | /* TODO: Remove this once we can handle fast format changes. */ | ||
6169 | if (old_other_state->fb && new_other_state->fb && | ||
6170 | old_other_state->fb->format != new_other_state->fb->format) | ||
6171 | return true; | ||
6172 | } | ||
6173 | |||
6174 | return false; | ||
6175 | } | ||
6176 | |||
6019 | static int dm_update_plane_state(struct dc *dc, | 6177 | static int dm_update_plane_state(struct dc *dc, |
6020 | struct drm_atomic_state *state, | 6178 | struct drm_atomic_state *state, |
6021 | struct drm_plane *plane, | 6179 | struct drm_plane *plane, |
@@ -6030,8 +6188,7 @@ static int dm_update_plane_state(struct dc *dc, | |||
6030 | struct drm_crtc_state *old_crtc_state, *new_crtc_state; | 6188 | struct drm_crtc_state *old_crtc_state, *new_crtc_state; |
6031 | struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; | 6189 | struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; |
6032 | struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; | 6190 | struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; |
6033 | /* TODO return page_flip_needed() function */ | 6191 | bool needs_reset; |
6034 | bool pflip_needed = !state->allow_modeset; | ||
6035 | int ret = 0; | 6192 | int ret = 0; |
6036 | 6193 | ||
6037 | 6194 | ||
@@ -6044,10 +6201,12 @@ static int dm_update_plane_state(struct dc *dc, | |||
6044 | if (plane->type == DRM_PLANE_TYPE_CURSOR) | 6201 | if (plane->type == DRM_PLANE_TYPE_CURSOR) |
6045 | return 0; | 6202 | return 0; |
6046 | 6203 | ||
6204 | needs_reset = should_reset_plane(state, plane, old_plane_state, | ||
6205 | new_plane_state); | ||
6206 | |||
6047 | /* Remove any changed/removed planes */ | 6207 | /* Remove any changed/removed planes */ |
6048 | if (!enable) { | 6208 | if (!enable) { |
6049 | if (pflip_needed && | 6209 | if (!needs_reset) |
6050 | plane->type != DRM_PLANE_TYPE_OVERLAY) | ||
6051 | return 0; | 6210 | return 0; |
6052 | 6211 | ||
6053 | if (!old_plane_crtc) | 6212 | if (!old_plane_crtc) |
@@ -6098,7 +6257,7 @@ static int dm_update_plane_state(struct dc *dc, | |||
6098 | if (!dm_new_crtc_state->stream) | 6257 | if (!dm_new_crtc_state->stream) |
6099 | return 0; | 6258 | return 0; |
6100 | 6259 | ||
6101 | if (pflip_needed && plane->type != DRM_PLANE_TYPE_OVERLAY) | 6260 | if (!needs_reset) |
6102 | return 0; | 6261 | return 0; |
6103 | 6262 | ||
6104 | WARN_ON(dm_new_plane_state->dc_state); | 6263 | WARN_ON(dm_new_plane_state->dc_state); |
@@ -6110,7 +6269,7 @@ static int dm_update_plane_state(struct dc *dc, | |||
6110 | DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n", | 6269 | DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n", |
6111 | plane->base.id, new_plane_crtc->base.id); | 6270 | plane->base.id, new_plane_crtc->base.id); |
6112 | 6271 | ||
6113 | ret = fill_plane_attributes( | 6272 | ret = fill_dc_plane_attributes( |
6114 | new_plane_crtc->dev->dev_private, | 6273 | new_plane_crtc->dev->dev_private, |
6115 | dc_new_plane_state, | 6274 | dc_new_plane_state, |
6116 | new_plane_state, | 6275 | new_plane_state, |
@@ -6158,10 +6317,11 @@ static int dm_update_plane_state(struct dc *dc, | |||
6158 | } | 6317 | } |
6159 | 6318 | ||
6160 | static int | 6319 | static int |
6161 | dm_determine_update_type_for_commit(struct dc *dc, | 6320 | dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, |
6162 | struct drm_atomic_state *state, | 6321 | struct drm_atomic_state *state, |
6163 | enum surface_update_type *out_type) | 6322 | enum surface_update_type *out_type) |
6164 | { | 6323 | { |
6324 | struct dc *dc = dm->dc; | ||
6165 | struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL; | 6325 | struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL; |
6166 | int i, j, num_plane, ret = 0; | 6326 | int i, j, num_plane, ret = 0; |
6167 | struct drm_plane_state *old_plane_state, *new_plane_state; | 6327 | struct drm_plane_state *old_plane_state, *new_plane_state; |
@@ -6175,20 +6335,19 @@ dm_determine_update_type_for_commit(struct dc *dc, | |||
6175 | struct dc_stream_status *status = NULL; | 6335 | struct dc_stream_status *status = NULL; |
6176 | 6336 | ||
6177 | struct dc_surface_update *updates; | 6337 | struct dc_surface_update *updates; |
6178 | struct dc_plane_state *surface; | ||
6179 | enum surface_update_type update_type = UPDATE_TYPE_FAST; | 6338 | enum surface_update_type update_type = UPDATE_TYPE_FAST; |
6180 | 6339 | ||
6181 | updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL); | 6340 | updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL); |
6182 | surface = kcalloc(MAX_SURFACES, sizeof(*surface), GFP_KERNEL); | ||
6183 | 6341 | ||
6184 | if (!updates || !surface) { | 6342 | if (!updates) { |
6185 | DRM_ERROR("Plane or surface update failed to allocate"); | 6343 | DRM_ERROR("Failed to allocate plane updates\n"); |
6186 | /* Set type to FULL to avoid crashing in DC*/ | 6344 | /* Set type to FULL to avoid crashing in DC*/ |
6187 | update_type = UPDATE_TYPE_FULL; | 6345 | update_type = UPDATE_TYPE_FULL; |
6188 | goto cleanup; | 6346 | goto cleanup; |
6189 | } | 6347 | } |
6190 | 6348 | ||
6191 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { | 6349 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { |
6350 | struct dc_scaling_info scaling_info; | ||
6192 | struct dc_stream_update stream_update; | 6351 | struct dc_stream_update stream_update; |
6193 | 6352 | ||
6194 | memset(&stream_update, 0, sizeof(stream_update)); | 6353 | memset(&stream_update, 0, sizeof(stream_update)); |
@@ -6219,23 +6378,12 @@ dm_determine_update_type_for_commit(struct dc *dc, | |||
6219 | goto cleanup; | 6378 | goto cleanup; |
6220 | } | 6379 | } |
6221 | 6380 | ||
6222 | if (!state->allow_modeset) | ||
6223 | continue; | ||
6224 | |||
6225 | if (crtc != new_plane_crtc) | 6381 | if (crtc != new_plane_crtc) |
6226 | continue; | 6382 | continue; |
6227 | 6383 | ||
6228 | updates[num_plane].surface = &surface[num_plane]; | 6384 | updates[num_plane].surface = new_dm_plane_state->dc_state; |
6229 | 6385 | ||
6230 | if (new_crtc_state->mode_changed) { | 6386 | if (new_crtc_state->mode_changed) { |
6231 | updates[num_plane].surface->src_rect = | ||
6232 | new_dm_plane_state->dc_state->src_rect; | ||
6233 | updates[num_plane].surface->dst_rect = | ||
6234 | new_dm_plane_state->dc_state->dst_rect; | ||
6235 | updates[num_plane].surface->rotation = | ||
6236 | new_dm_plane_state->dc_state->rotation; | ||
6237 | updates[num_plane].surface->in_transfer_func = | ||
6238 | new_dm_plane_state->dc_state->in_transfer_func; | ||
6239 | stream_update.dst = new_dm_crtc_state->stream->dst; | 6387 | stream_update.dst = new_dm_crtc_state->stream->dst; |
6240 | stream_update.src = new_dm_crtc_state->stream->src; | 6388 | stream_update.src = new_dm_crtc_state->stream->src; |
6241 | } | 6389 | } |
@@ -6251,6 +6399,13 @@ dm_determine_update_type_for_commit(struct dc *dc, | |||
6251 | new_dm_crtc_state->stream->out_transfer_func; | 6399 | new_dm_crtc_state->stream->out_transfer_func; |
6252 | } | 6400 | } |
6253 | 6401 | ||
6402 | ret = fill_dc_scaling_info(new_plane_state, | ||
6403 | &scaling_info); | ||
6404 | if (ret) | ||
6405 | goto cleanup; | ||
6406 | |||
6407 | updates[num_plane].scaling_info = &scaling_info; | ||
6408 | |||
6254 | num_plane++; | 6409 | num_plane++; |
6255 | } | 6410 | } |
6256 | 6411 | ||
@@ -6270,8 +6425,14 @@ dm_determine_update_type_for_commit(struct dc *dc, | |||
6270 | status = dc_stream_get_status_from_state(old_dm_state->context, | 6425 | status = dc_stream_get_status_from_state(old_dm_state->context, |
6271 | new_dm_crtc_state->stream); | 6426 | new_dm_crtc_state->stream); |
6272 | 6427 | ||
6428 | /* | ||
6429 | * TODO: DC modifies the surface during this call so we need | ||
6430 | * to lock here - find a way to do this without locking. | ||
6431 | */ | ||
6432 | mutex_lock(&dm->dc_lock); | ||
6273 | update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane, | 6433 | update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane, |
6274 | &stream_update, status); | 6434 | &stream_update, status); |
6435 | mutex_unlock(&dm->dc_lock); | ||
6275 | 6436 | ||
6276 | if (update_type > UPDATE_TYPE_MED) { | 6437 | if (update_type > UPDATE_TYPE_MED) { |
6277 | update_type = UPDATE_TYPE_FULL; | 6438 | update_type = UPDATE_TYPE_FULL; |
@@ -6281,7 +6442,6 @@ dm_determine_update_type_for_commit(struct dc *dc, | |||
6281 | 6442 | ||
6282 | cleanup: | 6443 | cleanup: |
6283 | kfree(updates); | 6444 | kfree(updates); |
6284 | kfree(surface); | ||
6285 | 6445 | ||
6286 | *out_type = update_type; | 6446 | *out_type = update_type; |
6287 | return ret; | 6447 | return ret; |
@@ -6465,7 +6625,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
6465 | lock_and_validation_needed = true; | 6625 | lock_and_validation_needed = true; |
6466 | } | 6626 | } |
6467 | 6627 | ||
6468 | ret = dm_determine_update_type_for_commit(dc, state, &update_type); | 6628 | ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type); |
6469 | if (ret) | 6629 | if (ret) |
6470 | goto fail; | 6630 | goto fail; |
6471 | 6631 | ||
@@ -6480,9 +6640,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
6480 | */ | 6640 | */ |
6481 | if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST) | 6641 | if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST) |
6482 | WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL"); | 6642 | WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL"); |
6483 | else if (!lock_and_validation_needed && overall_update_type > UPDATE_TYPE_FAST) | ||
6484 | WARN(1, "Global lock should NOT be set, overall_update_type should be UPDATE_TYPE_FAST"); | ||
6485 | |||
6486 | 6643 | ||
6487 | if (overall_update_type > UPDATE_TYPE_FAST) { | 6644 | if (overall_update_type > UPDATE_TYPE_FAST) { |
6488 | ret = dm_atomic_get_state(state, &dm_state); | 6645 | ret = dm_atomic_get_state(state, &dm_state); |
@@ -6493,7 +6650,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
6493 | if (ret) | 6650 | if (ret) |
6494 | goto fail; | 6651 | goto fail; |
6495 | 6652 | ||
6496 | if (dc_validate_global_state(dc, dm_state->context) != DC_OK) { | 6653 | if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) { |
6497 | ret = -EINVAL; | 6654 | ret = -EINVAL; |
6498 | goto fail; | 6655 | goto fail; |
6499 | } | 6656 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 8843361e842d..1b4b51657f5e 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | |||
@@ -701,8 +701,15 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v, | |||
701 | 701 | ||
702 | bool dcn_validate_bandwidth( | 702 | bool dcn_validate_bandwidth( |
703 | struct dc *dc, | 703 | struct dc *dc, |
704 | struct dc_state *context) | 704 | struct dc_state *context, |
705 | bool fast_validate) | ||
705 | { | 706 | { |
707 | /* | ||
708 | * we want a breakdown of the various stages of validation, which the | ||
709 | * perf_trace macro doesn't support | ||
710 | */ | ||
711 | BW_VAL_TRACE_SETUP(); | ||
712 | |||
706 | const struct resource_pool *pool = dc->res_pool; | 713 | const struct resource_pool *pool = dc->res_pool; |
707 | struct dcn_bw_internal_vars *v = &context->dcn_bw_vars; | 714 | struct dcn_bw_internal_vars *v = &context->dcn_bw_vars; |
708 | int i, input_idx; | 715 | int i, input_idx; |
@@ -711,6 +718,9 @@ bool dcn_validate_bandwidth( | |||
711 | float bw_limit; | 718 | float bw_limit; |
712 | 719 | ||
713 | PERFORMANCE_TRACE_START(); | 720 | PERFORMANCE_TRACE_START(); |
721 | |||
722 | BW_VAL_TRACE_COUNT(); | ||
723 | |||
714 | if (dcn_bw_apply_registry_override(dc)) | 724 | if (dcn_bw_apply_registry_override(dc)) |
715 | dcn_bw_sync_calcs_and_dml(dc); | 725 | dcn_bw_sync_calcs_and_dml(dc); |
716 | 726 | ||
@@ -1013,8 +1023,11 @@ bool dcn_validate_bandwidth( | |||
1013 | mode_support_and_system_configuration(v); | 1023 | mode_support_and_system_configuration(v); |
1014 | } | 1024 | } |
1015 | 1025 | ||
1016 | if (v->voltage_level != 5) { | 1026 | BW_VAL_TRACE_END_VOLTAGE_LEVEL(); |
1027 | |||
1028 | if (v->voltage_level != number_of_states_plus_one && !fast_validate) { | ||
1017 | float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second; | 1029 | float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second; |
1030 | |||
1018 | if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65) | 1031 | if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65) |
1019 | bw_consumed = v->fabric_and_dram_bandwidth_vmin0p65; | 1032 | bw_consumed = v->fabric_and_dram_bandwidth_vmin0p65; |
1020 | else if (bw_consumed < v->fabric_and_dram_bandwidth_vmid0p72) | 1033 | else if (bw_consumed < v->fabric_and_dram_bandwidth_vmid0p72) |
@@ -1087,6 +1100,8 @@ bool dcn_validate_bandwidth( | |||
1087 | break; | 1100 | break; |
1088 | } | 1101 | } |
1089 | 1102 | ||
1103 | BW_VAL_TRACE_END_WATERMARKS(); | ||
1104 | |||
1090 | for (i = 0, input_idx = 0; i < pool->pipe_count; i++) { | 1105 | for (i = 0, input_idx = 0; i < pool->pipe_count; i++) { |
1091 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; | 1106 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
1092 | 1107 | ||
@@ -1177,6 +1192,10 @@ bool dcn_validate_bandwidth( | |||
1177 | 1192 | ||
1178 | input_idx++; | 1193 | input_idx++; |
1179 | } | 1194 | } |
1195 | } else if (v->voltage_level == number_of_states_plus_one) { | ||
1196 | BW_VAL_TRACE_SKIP(fail); | ||
1197 | } else if (fast_validate) { | ||
1198 | BW_VAL_TRACE_SKIP(fast); | ||
1180 | } | 1199 | } |
1181 | 1200 | ||
1182 | if (v->voltage_level == 0) { | 1201 | if (v->voltage_level == 0) { |
@@ -1196,6 +1215,7 @@ bool dcn_validate_bandwidth( | |||
1196 | kernel_fpu_end(); | 1215 | kernel_fpu_end(); |
1197 | 1216 | ||
1198 | PERFORMANCE_TRACE_END(); | 1217 | PERFORMANCE_TRACE_END(); |
1218 | BW_VAL_TRACE_FINISH(); | ||
1199 | 1219 | ||
1200 | if (bw_limit_pass && v->voltage_level != 5) | 1220 | if (bw_limit_pass && v->voltage_level != 5) |
1201 | return true; | 1221 | return true; |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 4887d0611001..dda10b1f8574 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c | |||
@@ -597,7 +597,7 @@ uint32_t dc_link_bandwidth_kbps( | |||
597 | 597 | ||
598 | } | 598 | } |
599 | 599 | ||
600 | const struct dc_link_settings *dc_link_get_verified_link_cap( | 600 | const struct dc_link_settings *dc_link_get_link_cap( |
601 | const struct dc_link *link) | 601 | const struct dc_link *link) |
602 | { | 602 | { |
603 | if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN && | 603 | if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN && |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index acb4f829e042..a6424c70f4c5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | |||
@@ -1548,8 +1548,7 @@ bool dp_validate_mode_timing( | |||
1548 | timing->v_addressable == (uint32_t) 480) | 1548 | timing->v_addressable == (uint32_t) 480) |
1549 | return true; | 1549 | return true; |
1550 | 1550 | ||
1551 | /* We always use verified link settings */ | 1551 | link_setting = dc_link_get_link_cap(link); |
1552 | link_setting = dc_link_get_verified_link_cap(link); | ||
1553 | 1552 | ||
1554 | /* TODO: DYNAMIC_VALIDATION needs to be implemented */ | 1553 | /* TODO: DYNAMIC_VALIDATION needs to be implemented */ |
1555 | /*if (flags.DYNAMIC_VALIDATION == 1 && | 1554 | /*if (flags.DYNAMIC_VALIDATION == 1 && |
@@ -2587,6 +2586,9 @@ void detect_edp_sink_caps(struct dc_link *link) | |||
2587 | uint32_t entry; | 2586 | uint32_t entry; |
2588 | uint32_t link_rate_in_khz; | 2587 | uint32_t link_rate_in_khz; |
2589 | enum dc_link_rate link_rate = LINK_RATE_UNKNOWN; | 2588 | enum dc_link_rate link_rate = LINK_RATE_UNKNOWN; |
2589 | union lane_count_set lane_count_set = { {0} }; | ||
2590 | uint8_t link_bw_set; | ||
2591 | uint8_t link_rate_set; | ||
2590 | 2592 | ||
2591 | retrieve_link_cap(link); | 2593 | retrieve_link_cap(link); |
2592 | link->dpcd_caps.edp_supported_link_rates_count = 0; | 2594 | link->dpcd_caps.edp_supported_link_rates_count = 0; |
@@ -2612,6 +2614,33 @@ void detect_edp_sink_caps(struct dc_link *link) | |||
2612 | } | 2614 | } |
2613 | } | 2615 | } |
2614 | link->verified_link_cap = link->reported_link_cap; | 2616 | link->verified_link_cap = link->reported_link_cap; |
2617 | |||
2618 | // Read DPCD 00101h to find out the number of lanes currently set | ||
2619 | core_link_read_dpcd(link, DP_LANE_COUNT_SET, | ||
2620 | &lane_count_set.raw, sizeof(lane_count_set)); | ||
2621 | link->cur_link_settings.lane_count = lane_count_set.bits.LANE_COUNT_SET; | ||
2622 | |||
2623 | // Read DPCD 00100h to find if standard link rates are set | ||
2624 | core_link_read_dpcd(link, DP_LINK_BW_SET, | ||
2625 | &link_bw_set, sizeof(link_bw_set)); | ||
2626 | |||
2627 | if (link_bw_set == 0) { | ||
2628 | /* If standard link rates are not being used, | ||
2629 | * Read DPCD 00115h to find the link rate set used | ||
2630 | */ | ||
2631 | core_link_read_dpcd(link, DP_LINK_RATE_SET, | ||
2632 | &link_rate_set, sizeof(link_rate_set)); | ||
2633 | |||
2634 | if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { | ||
2635 | link->cur_link_settings.link_rate = | ||
2636 | link->dpcd_caps.edp_supported_link_rates[link_rate_set]; | ||
2637 | link->cur_link_settings.link_rate_set = link_rate_set; | ||
2638 | link->cur_link_settings.use_link_rate_set = true; | ||
2639 | } | ||
2640 | } else { | ||
2641 | link->cur_link_settings.link_rate = link_bw_set; | ||
2642 | link->cur_link_settings.use_link_rate_set = false; | ||
2643 | } | ||
2615 | } | 2644 | } |
2616 | 2645 | ||
2617 | void dc_link_dp_enable_hpd(const struct dc_link *link) | 2646 | void dc_link_dp_enable_hpd(const struct dc_link *link) |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 3830e6ce1355..eac7186e4f08 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c | |||
@@ -2067,12 +2067,14 @@ void dc_resource_state_construct( | |||
2067 | * Checks HW resource availability and bandwidth requirement. | 2067 | * Checks HW resource availability and bandwidth requirement. |
2068 | * @dc: dc struct for this driver | 2068 | * @dc: dc struct for this driver |
2069 | * @new_ctx: state to be validated | 2069 | * @new_ctx: state to be validated |
2070 | * @fast_validate: set to true if only yes/no to support matters | ||
2070 | * | 2071 | * |
2071 | * Return: DC_OK if the result can be programmed. Otherwise, an error code. | 2072 | * Return: DC_OK if the result can be programmed. Otherwise, an error code. |
2072 | */ | 2073 | */ |
2073 | enum dc_status dc_validate_global_state( | 2074 | enum dc_status dc_validate_global_state( |
2074 | struct dc *dc, | 2075 | struct dc *dc, |
2075 | struct dc_state *new_ctx) | 2076 | struct dc_state *new_ctx, |
2077 | bool fast_validate) | ||
2076 | { | 2078 | { |
2077 | enum dc_status result = DC_ERROR_UNEXPECTED; | 2079 | enum dc_status result = DC_ERROR_UNEXPECTED; |
2078 | int i, j; | 2080 | int i, j; |
@@ -2127,7 +2129,7 @@ enum dc_status dc_validate_global_state( | |||
2127 | result = resource_build_scaling_params_for_context(dc, new_ctx); | 2129 | result = resource_build_scaling_params_for_context(dc, new_ctx); |
2128 | 2130 | ||
2129 | if (result == DC_OK) | 2131 | if (result == DC_OK) |
2130 | if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx)) | 2132 | if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate)) |
2131 | result = DC_FAIL_BANDWIDTH_VALIDATE; | 2133 | result = DC_FAIL_BANDWIDTH_VALIDATE; |
2132 | 2134 | ||
2133 | return result; | 2135 | return result; |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index f7a293902234..e10479d58c11 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c | |||
@@ -211,7 +211,8 @@ static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc) | |||
211 | ASIC_REV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) { | 211 | ASIC_REV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) { |
212 | 212 | ||
213 | vupdate_line = get_vupdate_offset_from_vsync(pipe_ctx); | 213 | vupdate_line = get_vupdate_offset_from_vsync(pipe_ctx); |
214 | dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos); | 214 | if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos)) |
215 | return; | ||
215 | 216 | ||
216 | if (vpos >= vupdate_line) | 217 | if (vpos >= vupdate_line) |
217 | return; | 218 | return; |
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index dc6a2c236ba7..3459e39714bc 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h | |||
@@ -39,7 +39,7 @@ | |||
39 | #include "inc/hw/dmcu.h" | 39 | #include "inc/hw/dmcu.h" |
40 | #include "dml/display_mode_lib.h" | 40 | #include "dml/display_mode_lib.h" |
41 | 41 | ||
42 | #define DC_VER "3.2.25" | 42 | #define DC_VER "3.2.26" |
43 | 43 | ||
44 | #define MAX_SURFACES 3 | 44 | #define MAX_SURFACES 3 |
45 | #define MAX_PLANES 6 | 45 | #define MAX_PLANES 6 |
@@ -66,8 +66,27 @@ struct dc_plane_cap { | |||
66 | uint32_t blends_with_above : 1; | 66 | uint32_t blends_with_above : 1; |
67 | uint32_t blends_with_below : 1; | 67 | uint32_t blends_with_below : 1; |
68 | uint32_t per_pixel_alpha : 1; | 68 | uint32_t per_pixel_alpha : 1; |
69 | uint32_t supports_argb8888 : 1; | 69 | struct { |
70 | uint32_t supports_nv12 : 1; | 70 | uint32_t argb8888 : 1; |
71 | uint32_t nv12 : 1; | ||
72 | uint32_t fp16 : 1; | ||
73 | } pixel_format_support; | ||
74 | // max upscaling factor x1000 | ||
75 | // upscaling factors are always >= 1 | ||
76 | // for example, 1080p -> 8K is 4.0, or 4000 raw value | ||
77 | struct { | ||
78 | uint32_t argb8888; | ||
79 | uint32_t nv12; | ||
80 | uint32_t fp16; | ||
81 | } max_upscale_factor; | ||
82 | // max downscale factor x1000 | ||
83 | // downscale factors are always <= 1 | ||
84 | // for example, 8K -> 1080p is 0.25, or 250 raw value | ||
85 | struct { | ||
86 | uint32_t argb8888; | ||
87 | uint32_t nv12; | ||
88 | uint32_t fp16; | ||
89 | } max_downscale_factor; | ||
71 | }; | 90 | }; |
72 | 91 | ||
73 | struct dc_caps { | 92 | struct dc_caps { |
@@ -183,6 +202,7 @@ struct dc_config { | |||
183 | bool disable_disp_pll_sharing; | 202 | bool disable_disp_pll_sharing; |
184 | bool fbc_support; | 203 | bool fbc_support; |
185 | bool optimize_edp_link_rate; | 204 | bool optimize_edp_link_rate; |
205 | bool disable_fractional_pwm; | ||
186 | bool allow_seamless_boot_optimization; | 206 | bool allow_seamless_boot_optimization; |
187 | }; | 207 | }; |
188 | 208 | ||
@@ -226,6 +246,57 @@ struct dc_clocks { | |||
226 | bool p_state_change_support; | 246 | bool p_state_change_support; |
227 | }; | 247 | }; |
228 | 248 | ||
249 | struct dc_bw_validation_profile { | ||
250 | bool enable; | ||
251 | |||
252 | unsigned long long total_ticks; | ||
253 | unsigned long long voltage_level_ticks; | ||
254 | unsigned long long watermark_ticks; | ||
255 | unsigned long long rq_dlg_ticks; | ||
256 | |||
257 | unsigned long long total_count; | ||
258 | unsigned long long skip_fast_count; | ||
259 | unsigned long long skip_pass_count; | ||
260 | unsigned long long skip_fail_count; | ||
261 | }; | ||
262 | |||
263 | #define BW_VAL_TRACE_SETUP() \ | ||
264 | unsigned long long end_tick = 0; \ | ||
265 | unsigned long long voltage_level_tick = 0; \ | ||
266 | unsigned long long watermark_tick = 0; \ | ||
267 | unsigned long long start_tick = dc->debug.bw_val_profile.enable ? \ | ||
268 | dm_get_timestamp(dc->ctx) : 0 | ||
269 | |||
270 | #define BW_VAL_TRACE_COUNT() \ | ||
271 | if (dc->debug.bw_val_profile.enable) \ | ||
272 | dc->debug.bw_val_profile.total_count++ | ||
273 | |||
274 | #define BW_VAL_TRACE_SKIP(status) \ | ||
275 | if (dc->debug.bw_val_profile.enable) { \ | ||
276 | if (!voltage_level_tick) \ | ||
277 | voltage_level_tick = dm_get_timestamp(dc->ctx); \ | ||
278 | dc->debug.bw_val_profile.skip_ ## status ## _count++; \ | ||
279 | } | ||
280 | |||
281 | #define BW_VAL_TRACE_END_VOLTAGE_LEVEL() \ | ||
282 | if (dc->debug.bw_val_profile.enable) \ | ||
283 | voltage_level_tick = dm_get_timestamp(dc->ctx) | ||
284 | |||
285 | #define BW_VAL_TRACE_END_WATERMARKS() \ | ||
286 | if (dc->debug.bw_val_profile.enable) \ | ||
287 | watermark_tick = dm_get_timestamp(dc->ctx) | ||
288 | |||
289 | #define BW_VAL_TRACE_FINISH() \ | ||
290 | if (dc->debug.bw_val_profile.enable) { \ | ||
291 | end_tick = dm_get_timestamp(dc->ctx); \ | ||
292 | dc->debug.bw_val_profile.total_ticks += end_tick - start_tick; \ | ||
293 | dc->debug.bw_val_profile.voltage_level_ticks += voltage_level_tick - start_tick; \ | ||
294 | if (watermark_tick) { \ | ||
295 | dc->debug.bw_val_profile.watermark_ticks += watermark_tick - voltage_level_tick; \ | ||
296 | dc->debug.bw_val_profile.rq_dlg_ticks += end_tick - watermark_tick; \ | ||
297 | } \ | ||
298 | } | ||
299 | |||
229 | struct dc_debug_options { | 300 | struct dc_debug_options { |
230 | enum visual_confirm visual_confirm; | 301 | enum visual_confirm visual_confirm; |
231 | bool sanity_checks; | 302 | bool sanity_checks; |
@@ -279,6 +350,7 @@ struct dc_debug_options { | |||
279 | unsigned int force_odm_combine; //bit vector based on otg inst | 350 | unsigned int force_odm_combine; //bit vector based on otg inst |
280 | unsigned int force_fclk_khz; | 351 | unsigned int force_fclk_khz; |
281 | bool disable_tri_buf; | 352 | bool disable_tri_buf; |
353 | struct dc_bw_validation_profile bw_val_profile; | ||
282 | }; | 354 | }; |
283 | 355 | ||
284 | struct dc_debug_data { | 356 | struct dc_debug_data { |
@@ -638,9 +710,14 @@ enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *pla | |||
638 | 710 | ||
639 | void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info); | 711 | void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info); |
640 | 712 | ||
713 | /* | ||
714 | * fast_validate: we return after determining if we can support the new state, | ||
715 | * but before we populate the programming info | ||
716 | */ | ||
641 | enum dc_status dc_validate_global_state( | 717 | enum dc_status dc_validate_global_state( |
642 | struct dc *dc, | 718 | struct dc *dc, |
643 | struct dc_state *new_ctx); | 719 | struct dc_state *new_ctx, |
720 | bool fast_validate); | ||
644 | 721 | ||
645 | 722 | ||
646 | void dc_resource_state_construct( | 723 | void dc_resource_state_construct( |
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 4e26d6e93b31..cc7ffac64c96 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h | |||
@@ -250,7 +250,7 @@ uint32_t dc_link_bandwidth_kbps( | |||
250 | const struct dc_link *link, | 250 | const struct dc_link *link, |
251 | const struct dc_link_settings *link_setting); | 251 | const struct dc_link_settings *link_setting); |
252 | 252 | ||
253 | const struct dc_link_settings *dc_link_get_verified_link_cap( | 253 | const struct dc_link_settings *dc_link_get_link_cap( |
254 | const struct dc_link *link); | 254 | const struct dc_link *link); |
255 | 255 | ||
256 | bool dc_submit_i2c( | 256 | bool dc_submit_i2c( |
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c index da96229db53a..855360b1414f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #define MCP_ABM_LEVEL_SET 0x65 | 50 | #define MCP_ABM_LEVEL_SET 0x65 |
51 | #define MCP_ABM_PIPE_SET 0x66 | 51 | #define MCP_ABM_PIPE_SET 0x66 |
52 | #define MCP_BL_SET 0x67 | 52 | #define MCP_BL_SET 0x67 |
53 | #define MCP_BL_SET_PWM_FRAC 0x6A /* Enable or disable Fractional PWM */ | ||
53 | 54 | ||
54 | #define MCP_DISABLE_ABM_IMMEDIATELY 255 | 55 | #define MCP_DISABLE_ABM_IMMEDIATELY 255 |
55 | 56 | ||
@@ -390,6 +391,23 @@ static bool dce_abm_init_backlight(struct abm *abm) | |||
390 | REG_UPDATE(BL_PWM_GRP1_REG_LOCK, | 391 | REG_UPDATE(BL_PWM_GRP1_REG_LOCK, |
391 | BL_PWM_GRP1_REG_LOCK, 0); | 392 | BL_PWM_GRP1_REG_LOCK, 0); |
392 | 393 | ||
394 | /* Wait until microcontroller is ready to process interrupt */ | ||
395 | REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); | ||
396 | |||
397 | /* Set PWM fractional enable/disable */ | ||
398 | value = (abm->ctx->dc->config.disable_fractional_pwm == false) ? 1 : 0; | ||
399 | REG_WRITE(MASTER_COMM_DATA_REG1, value); | ||
400 | |||
401 | /* Set command to enable or disable fractional PWM microcontroller */ | ||
402 | REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, | ||
403 | MCP_BL_SET_PWM_FRAC); | ||
404 | |||
405 | /* Notify microcontroller of new command */ | ||
406 | REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); | ||
407 | |||
408 | /* Ensure command has been executed before continuing */ | ||
409 | REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); | ||
410 | |||
393 | return true; | 411 | return true; |
394 | } | 412 | } |
395 | 413 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 767d37c6d942..e938bf9986d3 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c | |||
@@ -380,7 +380,24 @@ static const struct resource_caps res_cap = { | |||
380 | 380 | ||
381 | static const struct dc_plane_cap plane_cap = { | 381 | static const struct dc_plane_cap plane_cap = { |
382 | .type = DC_PLANE_TYPE_DCE_RGB, | 382 | .type = DC_PLANE_TYPE_DCE_RGB, |
383 | .supports_argb8888 = true, | 383 | |
384 | .pixel_format_support = { | ||
385 | .argb8888 = true, | ||
386 | .nv12 = false, | ||
387 | .fp16 = false | ||
388 | }, | ||
389 | |||
390 | .max_upscale_factor = { | ||
391 | .argb8888 = 16000, | ||
392 | .nv12 = 1, | ||
393 | .fp16 = 1 | ||
394 | }, | ||
395 | |||
396 | .max_downscale_factor = { | ||
397 | .argb8888 = 250, | ||
398 | .nv12 = 1, | ||
399 | .fp16 = 1 | ||
400 | } | ||
384 | }; | 401 | }; |
385 | 402 | ||
386 | #define CTX ctx | 403 | #define CTX ctx |
@@ -761,7 +778,8 @@ static enum dc_status build_mapped_resource( | |||
761 | 778 | ||
762 | bool dce100_validate_bandwidth( | 779 | bool dce100_validate_bandwidth( |
763 | struct dc *dc, | 780 | struct dc *dc, |
764 | struct dc_state *context) | 781 | struct dc_state *context, |
782 | bool fast_validate) | ||
765 | { | 783 | { |
766 | int i; | 784 | int i; |
767 | bool at_least_one_pipe = false; | 785 | bool at_least_one_pipe = false; |
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index 7c4914b2b524..dcd04e9ea76b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c | |||
@@ -397,14 +397,48 @@ static const struct dc_plane_cap plane_cap = { | |||
397 | .blends_with_below = true, | 397 | .blends_with_below = true, |
398 | .blends_with_above = true, | 398 | .blends_with_above = true, |
399 | .per_pixel_alpha = 1, | 399 | .per_pixel_alpha = 1, |
400 | .supports_argb8888 = true, | 400 | |
401 | .pixel_format_support = { | ||
402 | .argb8888 = true, | ||
403 | .nv12 = false, | ||
404 | .fp16 = false | ||
405 | }, | ||
406 | |||
407 | .max_upscale_factor = { | ||
408 | .argb8888 = 16000, | ||
409 | .nv12 = 1, | ||
410 | .fp16 = 1 | ||
411 | }, | ||
412 | |||
413 | .max_downscale_factor = { | ||
414 | .argb8888 = 250, | ||
415 | .nv12 = 1, | ||
416 | .fp16 = 1 | ||
417 | } | ||
401 | }; | 418 | }; |
402 | 419 | ||
403 | static const struct dc_plane_cap underlay_plane_cap = { | 420 | static const struct dc_plane_cap underlay_plane_cap = { |
404 | .type = DC_PLANE_TYPE_DCE_UNDERLAY, | 421 | .type = DC_PLANE_TYPE_DCE_UNDERLAY, |
405 | .blends_with_above = true, | 422 | .blends_with_above = true, |
406 | .per_pixel_alpha = 1, | 423 | .per_pixel_alpha = 1, |
407 | .supports_nv12 = true | 424 | |
425 | .pixel_format_support = { | ||
426 | .argb8888 = false, | ||
427 | .nv12 = true, | ||
428 | .fp16 = false | ||
429 | }, | ||
430 | |||
431 | .max_upscale_factor = { | ||
432 | .argb8888 = 1, | ||
433 | .nv12 = 16000, | ||
434 | .fp16 = 1 | ||
435 | }, | ||
436 | |||
437 | .max_downscale_factor = { | ||
438 | .argb8888 = 1, | ||
439 | .nv12 = 250, | ||
440 | .fp16 = 1 | ||
441 | } | ||
408 | }; | 442 | }; |
409 | 443 | ||
410 | #define CTX ctx | 444 | #define CTX ctx |
@@ -869,7 +903,8 @@ static enum dc_status build_mapped_resource( | |||
869 | 903 | ||
870 | static bool dce110_validate_bandwidth( | 904 | static bool dce110_validate_bandwidth( |
871 | struct dc *dc, | 905 | struct dc *dc, |
872 | struct dc_state *context) | 906 | struct dc_state *context, |
907 | bool fast_validate) | ||
873 | { | 908 | { |
874 | bool result = false; | 909 | bool result = false; |
875 | 910 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 2f28a74383f5..a480b15f6885 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c | |||
@@ -399,7 +399,24 @@ static const struct resource_caps polaris_11_resource_cap = { | |||
399 | 399 | ||
400 | static const struct dc_plane_cap plane_cap = { | 400 | static const struct dc_plane_cap plane_cap = { |
401 | .type = DC_PLANE_TYPE_DCE_RGB, | 401 | .type = DC_PLANE_TYPE_DCE_RGB, |
402 | .supports_argb8888 = true, | 402 | |
403 | .pixel_format_support = { | ||
404 | .argb8888 = true, | ||
405 | .nv12 = false, | ||
406 | .fp16 = false | ||
407 | }, | ||
408 | |||
409 | .max_upscale_factor = { | ||
410 | .argb8888 = 16000, | ||
411 | .nv12 = 1, | ||
412 | .fp16 = 1 | ||
413 | }, | ||
414 | |||
415 | .max_downscale_factor = { | ||
416 | .argb8888 = 250, | ||
417 | .nv12 = 1, | ||
418 | .fp16 = 1 | ||
419 | } | ||
403 | }; | 420 | }; |
404 | 421 | ||
405 | #define CTX ctx | 422 | #define CTX ctx |
@@ -809,7 +826,8 @@ static enum dc_status build_mapped_resource( | |||
809 | 826 | ||
810 | bool dce112_validate_bandwidth( | 827 | bool dce112_validate_bandwidth( |
811 | struct dc *dc, | 828 | struct dc *dc, |
812 | struct dc_state *context) | 829 | struct dc_state *context, |
830 | bool fast_validate) | ||
813 | { | 831 | { |
814 | bool result = false; | 832 | bool result = false; |
815 | 833 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h index 95a403396219..1f57ebc6f9b4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h | |||
@@ -44,7 +44,8 @@ enum dc_status dce112_validate_with_context( | |||
44 | 44 | ||
45 | bool dce112_validate_bandwidth( | 45 | bool dce112_validate_bandwidth( |
46 | struct dc *dc, | 46 | struct dc *dc, |
47 | struct dc_state *context); | 47 | struct dc_state *context, |
48 | bool fast_validate); | ||
48 | 49 | ||
49 | enum dc_status dce112_add_stream_to_ctx( | 50 | enum dc_status dce112_add_stream_to_ctx( |
50 | struct dc *dc, | 51 | struct dc *dc, |
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 01ea503faa12..6d49c7143c67 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c | |||
@@ -456,7 +456,24 @@ static const struct resource_caps res_cap = { | |||
456 | 456 | ||
457 | static const struct dc_plane_cap plane_cap = { | 457 | static const struct dc_plane_cap plane_cap = { |
458 | .type = DC_PLANE_TYPE_DCE_RGB, | 458 | .type = DC_PLANE_TYPE_DCE_RGB, |
459 | .supports_argb8888 = true, | 459 | |
460 | .pixel_format_support = { | ||
461 | .argb8888 = true, | ||
462 | .nv12 = false, | ||
463 | .fp16 = false | ||
464 | }, | ||
465 | |||
466 | .max_upscale_factor = { | ||
467 | .argb8888 = 16000, | ||
468 | .nv12 = 1, | ||
469 | .fp16 = 1 | ||
470 | }, | ||
471 | |||
472 | .max_downscale_factor = { | ||
473 | .argb8888 = 250, | ||
474 | .nv12 = 1, | ||
475 | .fp16 = 1 | ||
476 | } | ||
460 | }; | 477 | }; |
461 | 478 | ||
462 | static const struct dc_debug_options debug_defaults = { | 479 | static const struct dc_debug_options debug_defaults = { |
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index c7899ec96287..27d0cc394963 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c | |||
@@ -389,7 +389,24 @@ static const struct resource_caps res_cap_83 = { | |||
389 | 389 | ||
390 | static const struct dc_plane_cap plane_cap = { | 390 | static const struct dc_plane_cap plane_cap = { |
391 | .type = DC_PLANE_TYPE_DCE_RGB, | 391 | .type = DC_PLANE_TYPE_DCE_RGB, |
392 | .supports_argb8888 = true, | 392 | |
393 | .pixel_format_support = { | ||
394 | .argb8888 = true, | ||
395 | .nv12 = false, | ||
396 | .fp16 = false | ||
397 | }, | ||
398 | |||
399 | .max_upscale_factor = { | ||
400 | .argb8888 = 16000, | ||
401 | .nv12 = 1, | ||
402 | .fp16 = 1 | ||
403 | }, | ||
404 | |||
405 | .max_downscale_factor = { | ||
406 | .argb8888 = 250, | ||
407 | .nv12 = 1, | ||
408 | .fp16 = 1 | ||
409 | } | ||
393 | }; | 410 | }; |
394 | 411 | ||
395 | static const struct dce_dmcu_registers dmcu_regs = { | 412 | static const struct dce_dmcu_registers dmcu_regs = { |
@@ -795,7 +812,8 @@ static void destruct(struct dce110_resource_pool *pool) | |||
795 | 812 | ||
796 | bool dce80_validate_bandwidth( | 813 | bool dce80_validate_bandwidth( |
797 | struct dc *dc, | 814 | struct dc *dc, |
798 | struct dc_state *context) | 815 | struct dc_state *context, |
816 | bool fast_validate) | ||
799 | { | 817 | { |
800 | int i; | 818 | int i; |
801 | bool at_least_one_pipe = false; | 819 | bool at_least_one_pipe = false; |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index 3268ab089363..db98ba361686 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | |||
@@ -247,7 +247,7 @@ | |||
247 | .field_name = reg_name ## __ ## field_name ## post_fix | 247 | .field_name = reg_name ## __ ## field_name ## post_fix |
248 | 248 | ||
249 | /* Mask/shift struct generation macro for all ASICs (including those with reduced functionality) */ | 249 | /* Mask/shift struct generation macro for all ASICs (including those with reduced functionality) */ |
250 | #define HUBP_MASK_SH_LIST_DCN(mask_sh)\ | 250 | #define HUBP_MASK_SH_LIST_DCN_COMMON(mask_sh)\ |
251 | HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\ | 251 | HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\ |
252 | HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\ | 252 | HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\ |
253 | HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\ | 253 | HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\ |
@@ -331,7 +331,6 @@ | |||
331 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, META_CHUNK_SIZE, mask_sh),\ | 331 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, META_CHUNK_SIZE, mask_sh),\ |
332 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_META_CHUNK_SIZE, mask_sh),\ | 332 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_META_CHUNK_SIZE, mask_sh),\ |
333 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, DPTE_GROUP_SIZE, mask_sh),\ | 333 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, DPTE_GROUP_SIZE, mask_sh),\ |
334 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\ | ||
335 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, mask_sh),\ | 334 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, mask_sh),\ |
336 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, PTE_ROW_HEIGHT_LINEAR, mask_sh),\ | 335 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, PTE_ROW_HEIGHT_LINEAR, mask_sh),\ |
337 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, CHUNK_SIZE_C, mask_sh),\ | 336 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, CHUNK_SIZE_C, mask_sh),\ |
@@ -339,7 +338,6 @@ | |||
339 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, META_CHUNK_SIZE_C, mask_sh),\ | 338 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, META_CHUNK_SIZE_C, mask_sh),\ |
340 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_META_CHUNK_SIZE_C, mask_sh),\ | 339 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_META_CHUNK_SIZE_C, mask_sh),\ |
341 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, DPTE_GROUP_SIZE_C, mask_sh),\ | 340 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, DPTE_GROUP_SIZE_C, mask_sh),\ |
342 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh),\ | ||
343 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, SWATH_HEIGHT_C, mask_sh),\ | 341 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, SWATH_HEIGHT_C, mask_sh),\ |
344 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, PTE_ROW_HEIGHT_LINEAR_C, mask_sh),\ | 342 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, PTE_ROW_HEIGHT_LINEAR_C, mask_sh),\ |
345 | HUBP_SF(HUBPREQ0_BLANK_OFFSET_0, REFCYC_H_BLANK_END, mask_sh),\ | 343 | HUBP_SF(HUBPREQ0_BLANK_OFFSET_0, REFCYC_H_BLANK_END, mask_sh),\ |
@@ -373,6 +371,11 @@ | |||
373 | HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\ | 371 | HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\ |
374 | HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh) | 372 | HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh) |
375 | 373 | ||
374 | #define HUBP_MASK_SH_LIST_DCN(mask_sh)\ | ||
375 | HUBP_MASK_SH_LIST_DCN_COMMON(mask_sh),\ | ||
376 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\ | ||
377 | HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh) | ||
378 | |||
376 | /* Mask/shift struct generation macro for ASICs with VM */ | 379 | /* Mask/shift struct generation macro for ASICs with VM */ |
377 | #define HUBP_MASK_SH_LIST_DCN_VM(mask_sh)\ | 380 | #define HUBP_MASK_SH_LIST_DCN_VM(mask_sh)\ |
378 | HUBP_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\ | 381 | HUBP_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\ |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 79f4fbb8a145..7eccb54c421d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | |||
@@ -521,8 +521,24 @@ static const struct dc_plane_cap plane_cap = { | |||
521 | .blends_with_above = true, | 521 | .blends_with_above = true, |
522 | .blends_with_below = true, | 522 | .blends_with_below = true, |
523 | .per_pixel_alpha = true, | 523 | .per_pixel_alpha = true, |
524 | .supports_argb8888 = true, | 524 | |
525 | .supports_nv12 = true | 525 | .pixel_format_support = { |
526 | .argb8888 = true, | ||
527 | .nv12 = true, | ||
528 | .fp16 = true | ||
529 | }, | ||
530 | |||
531 | .max_upscale_factor = { | ||
532 | .argb8888 = 16000, | ||
533 | .nv12 = 16000, | ||
534 | .fp16 = 1 | ||
535 | }, | ||
536 | |||
537 | .max_downscale_factor = { | ||
538 | .argb8888 = 250, | ||
539 | .nv12 = 250, | ||
540 | .fp16 = 1 | ||
541 | } | ||
526 | }; | 542 | }; |
527 | 543 | ||
528 | static const struct dc_debug_options debug_defaults_drv = { | 544 | static const struct dc_debug_options debug_defaults_drv = { |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 827541e34ee2..88a82a23d259 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h | |||
@@ -97,7 +97,8 @@ struct resource_funcs { | |||
97 | const struct encoder_init_data *init); | 97 | const struct encoder_init_data *init); |
98 | bool (*validate_bandwidth)( | 98 | bool (*validate_bandwidth)( |
99 | struct dc *dc, | 99 | struct dc *dc, |
100 | struct dc_state *context); | 100 | struct dc_state *context, |
101 | bool fast_validate); | ||
101 | 102 | ||
102 | enum dc_status (*validate_global)( | 103 | enum dc_status (*validate_global)( |
103 | struct dc *dc, | 104 | struct dc *dc, |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h index 86ec3f69c765..263c09630c06 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h | |||
@@ -621,7 +621,8 @@ extern const struct dcn_ip_params dcn10_ip_defaults; | |||
621 | 621 | ||
622 | bool dcn_validate_bandwidth( | 622 | bool dcn_validate_bandwidth( |
623 | struct dc *dc, | 623 | struct dc *dc, |
624 | struct dc_state *context); | 624 | struct dc_state *context, |
625 | bool fast_validate); | ||
625 | 626 | ||
626 | unsigned int dcn_find_dcfclk_suits_all( | 627 | unsigned int dcn_find_dcfclk_suits_all( |
627 | const struct dc *dc, | 628 | const struct dc *dc, |
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 26a7d2c7f4fa..c8b168b3413b 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | |||
@@ -540,6 +540,8 @@ struct smu_funcs | |||
540 | int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed); | 540 | int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed); |
541 | int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed); | 541 | int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed); |
542 | int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed); | 542 | int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed); |
543 | int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate); | ||
544 | |||
543 | }; | 545 | }; |
544 | 546 | ||
545 | #define smu_init_microcode(smu) \ | 547 | #define smu_init_microcode(smu) \ |
@@ -723,6 +725,8 @@ struct smu_funcs | |||
723 | ((smu)->funcs->get_sclk ? (smu)->funcs->get_sclk((smu), (low)) : 0) | 725 | ((smu)->funcs->get_sclk ? (smu)->funcs->get_sclk((smu), (low)) : 0) |
724 | #define smu_get_mclk(smu, low) \ | 726 | #define smu_get_mclk(smu, low) \ |
725 | ((smu)->funcs->get_mclk ? (smu)->funcs->get_mclk((smu), (low)) : 0) | 727 | ((smu)->funcs->get_mclk ? (smu)->funcs->get_mclk((smu), (low)) : 0) |
728 | #define smu_set_xgmi_pstate(smu, pstate) \ | ||
729 | ((smu)->funcs->set_xgmi_pstate ? (smu)->funcs->set_xgmi_pstate((smu), (pstate)) : 0) | ||
726 | 730 | ||
727 | 731 | ||
728 | extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table, | 732 | extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table, |
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index d2e2a4e2d0eb..c478b38662d0 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c | |||
@@ -1893,6 +1893,13 @@ set_fan_speed_rpm_failed: | |||
1893 | return ret; | 1893 | return ret; |
1894 | } | 1894 | } |
1895 | 1895 | ||
1896 | static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, | ||
1897 | uint32_t pstate) | ||
1898 | { | ||
1899 | /* send msg to SMU to set pstate */ | ||
1900 | return 0; | ||
1901 | } | ||
1902 | |||
1896 | static const struct smu_funcs smu_v11_0_funcs = { | 1903 | static const struct smu_funcs smu_v11_0_funcs = { |
1897 | .init_microcode = smu_v11_0_init_microcode, | 1904 | .init_microcode = smu_v11_0_init_microcode, |
1898 | .load_microcode = smu_v11_0_load_microcode, | 1905 | .load_microcode = smu_v11_0_load_microcode, |
@@ -1947,6 +1954,7 @@ static const struct smu_funcs smu_v11_0_funcs = { | |||
1947 | .get_fan_speed_percent = smu_v11_0_get_fan_speed_percent, | 1954 | .get_fan_speed_percent = smu_v11_0_get_fan_speed_percent, |
1948 | .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, | 1955 | .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, |
1949 | .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, | 1956 | .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, |
1957 | .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate, | ||
1950 | }; | 1958 | }; |
1951 | 1959 | ||
1952 | void smu_v11_0_set_smu_funcs(struct smu_context *smu) | 1960 | void smu_v11_0_set_smu_funcs(struct smu_context *smu) |
@@ -1954,7 +1962,6 @@ void smu_v11_0_set_smu_funcs(struct smu_context *smu) | |||
1954 | struct amdgpu_device *adev = smu->adev; | 1962 | struct amdgpu_device *adev = smu->adev; |
1955 | 1963 | ||
1956 | smu->funcs = &smu_v11_0_funcs; | 1964 | smu->funcs = &smu_v11_0_funcs; |
1957 | |||
1958 | switch (adev->asic_type) { | 1965 | switch (adev->asic_type) { |
1959 | case CHIP_VEGA20: | 1966 | case CHIP_VEGA20: |
1960 | vega20_set_ppt_funcs(smu); | 1967 | vega20_set_ppt_funcs(smu); |
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index e3a97da4add9..4788730dbe78 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h | |||
@@ -528,6 +528,8 @@ struct drm_amdgpu_gem_va { | |||
528 | #define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05 | 528 | #define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05 |
529 | #define AMDGPU_CHUNK_ID_BO_HANDLES 0x06 | 529 | #define AMDGPU_CHUNK_ID_BO_HANDLES 0x06 |
530 | #define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07 | 530 | #define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07 |
531 | #define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT 0x08 | ||
532 | #define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL 0x09 | ||
531 | 533 | ||
532 | struct drm_amdgpu_cs_chunk { | 534 | struct drm_amdgpu_cs_chunk { |
533 | __u32 chunk_id; | 535 | __u32 chunk_id; |
@@ -608,6 +610,12 @@ struct drm_amdgpu_cs_chunk_sem { | |||
608 | __u32 handle; | 610 | __u32 handle; |
609 | }; | 611 | }; |
610 | 612 | ||
613 | struct drm_amdgpu_cs_chunk_syncobj { | ||
614 | __u32 handle; | ||
615 | __u32 flags; | ||
616 | __u64 point; | ||
617 | }; | ||
618 | |||
611 | #define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ 0 | 619 | #define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ 0 |
612 | #define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD 1 | 620 | #define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD 1 |
613 | #define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD 2 | 621 | #define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD 2 |