aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/dma-buf/dma-fence.c17
-rw-r--r--drivers/dma-buf/sync_debug.c2
-rw-r--r--drivers/dma-buf/sync_file.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h1
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h3
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c41
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c66
-rw-r--r--include/linux/dma-fence.h2
14 files changed, 95 insertions, 64 deletions
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 57da14c15987..56e0a0e1b600 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -75,11 +75,6 @@ int dma_fence_signal_locked(struct dma_fence *fence)
75 if (WARN_ON(!fence)) 75 if (WARN_ON(!fence))
76 return -EINVAL; 76 return -EINVAL;
77 77
78 if (!ktime_to_ns(fence->timestamp)) {
79 fence->timestamp = ktime_get();
80 smp_mb__before_atomic();
81 }
82
83 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { 78 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
84 ret = -EINVAL; 79 ret = -EINVAL;
85 80
@@ -87,8 +82,11 @@ int dma_fence_signal_locked(struct dma_fence *fence)
87 * we might have raced with the unlocked dma_fence_signal, 82 * we might have raced with the unlocked dma_fence_signal,
88 * still run through all callbacks 83 * still run through all callbacks
89 */ 84 */
90 } else 85 } else {
86 fence->timestamp = ktime_get();
87 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
91 trace_dma_fence_signaled(fence); 88 trace_dma_fence_signaled(fence);
89 }
92 90
93 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { 91 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
94 list_del_init(&cur->node); 92 list_del_init(&cur->node);
@@ -115,14 +113,11 @@ int dma_fence_signal(struct dma_fence *fence)
115 if (!fence) 113 if (!fence)
116 return -EINVAL; 114 return -EINVAL;
117 115
118 if (!ktime_to_ns(fence->timestamp)) {
119 fence->timestamp = ktime_get();
120 smp_mb__before_atomic();
121 }
122
123 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 116 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
124 return -EINVAL; 117 return -EINVAL;
125 118
119 fence->timestamp = ktime_get();
120 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
126 trace_dma_fence_signaled(fence); 121 trace_dma_fence_signaled(fence);
127 122
128 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { 123 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 82a6e7f6d37f..59a3b2f8ee91 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -84,7 +84,7 @@ static void sync_print_fence(struct seq_file *s,
84 show ? "_" : "", 84 show ? "_" : "",
85 sync_status_str(status)); 85 sync_status_str(status));
86 86
87 if (status) { 87 if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) {
88 struct timespec64 ts64 = 88 struct timespec64 ts64 =
89 ktime_to_timespec64(fence->timestamp); 89 ktime_to_timespec64(fence->timestamp);
90 90
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 545e2c5c4815..d7e219d2669d 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -391,7 +391,13 @@ static void sync_fill_fence_info(struct dma_fence *fence,
391 sizeof(info->driver_name)); 391 sizeof(info->driver_name));
392 392
393 info->status = dma_fence_get_status(fence); 393 info->status = dma_fence_get_status(fence);
394 info->timestamp_ns = ktime_to_ns(fence->timestamp); 394 while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
395 !test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
396 cpu_relax();
397 info->timestamp_ns =
398 test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
399 ktime_to_ns(fence->timestamp) :
400 ktime_set(0, 0);
395} 401}
396 402
397static long sync_file_ioctl_fence_info(struct sync_file *sync_file, 403static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 5f8ada1d872b..37971d9402e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -101,7 +101,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
101 if (adev->kfd) { 101 if (adev->kfd) {
102 struct kgd2kfd_shared_resources gpu_resources = { 102 struct kgd2kfd_shared_resources gpu_resources = {
103 .compute_vmid_bitmap = 0xFF00, 103 .compute_vmid_bitmap = 0xFF00,
104 .num_mec = adev->gfx.mec.num_mec,
105 .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec, 104 .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
106 .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe 105 .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe
107 }; 106 };
@@ -122,7 +121,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
122 121
123 /* According to linux/bitmap.h we shouldn't use bitmap_clear if 122 /* According to linux/bitmap.h we shouldn't use bitmap_clear if
124 * nbits is not compile time constant */ 123 * nbits is not compile time constant */
125 last_valid_bit = adev->gfx.mec.num_mec 124 last_valid_bit = 1 /* only first MEC can have compute queues */
126 * adev->gfx.mec.num_pipe_per_mec 125 * adev->gfx.mec.num_pipe_per_mec
127 * adev->gfx.mec.num_queue_per_pipe; 126 * adev->gfx.mec.num_queue_per_pipe;
128 for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i) 127 for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 88187bfc5ea3..3f95f7cb4019 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -226,10 +226,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
226 226
227 kfd->shared_resources = *gpu_resources; 227 kfd->shared_resources = *gpu_resources;
228 228
229 /* We only use the first MEC */
230 if (kfd->shared_resources.num_mec > 1)
231 kfd->shared_resources.num_mec = 1;
232
233 /* calculate max size of mqds needed for queues */ 229 /* calculate max size of mqds needed for queues */
234 size = max_num_of_queues_per_device * 230 size = max_num_of_queues_per_device *
235 kfd->device_info->mqd_size_aligned; 231 kfd->device_info->mqd_size_aligned;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 955aa304ff48..602769ced3bd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -77,13 +77,6 @@ static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
77 return false; 77 return false;
78} 78}
79 79
80unsigned int get_mec_num(struct device_queue_manager *dqm)
81{
82 BUG_ON(!dqm || !dqm->dev);
83
84 return dqm->dev->shared_resources.num_mec;
85}
86
87unsigned int get_queues_num(struct device_queue_manager *dqm) 80unsigned int get_queues_num(struct device_queue_manager *dqm)
88{ 81{
89 BUG_ON(!dqm || !dqm->dev); 82 BUG_ON(!dqm || !dqm->dev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 66b9615bc3c1..faf820a06400 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -180,7 +180,6 @@ void device_queue_manager_init_cik(struct device_queue_manager_asic_ops *ops);
180void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops); 180void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops);
181void program_sh_mem_settings(struct device_queue_manager *dqm, 181void program_sh_mem_settings(struct device_queue_manager *dqm,
182 struct qcm_process_device *qpd); 182 struct qcm_process_device *qpd);
183unsigned int get_mec_num(struct device_queue_manager *dqm);
184unsigned int get_queues_num(struct device_queue_manager *dqm); 183unsigned int get_queues_num(struct device_queue_manager *dqm);
185unsigned int get_queues_per_pipe(struct device_queue_manager *dqm); 184unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
186unsigned int get_pipes_per_mec(struct device_queue_manager *dqm); 185unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 91ef1484b3bb..36f376677a53 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -63,9 +63,6 @@ struct kgd2kfd_shared_resources {
63 /* Bit n == 1 means VMID n is available for KFD. */ 63 /* Bit n == 1 means VMID n is available for KFD. */
64 unsigned int compute_vmid_bitmap; 64 unsigned int compute_vmid_bitmap;
65 65
66 /* number of mec available from the hardware */
67 uint32_t num_mec;
68
69 /* number of pipes per mec */ 66 /* number of pipes per mec */
70 uint32_t num_pipe_per_mec; 67 uint32_t num_pipe_per_mec;
71 68
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index bfd237c15e76..ae5f06895562 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -330,6 +330,13 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
330 return false; 330 return false;
331 } 331 }
332 332
333 /*
334 * ignore out-of-order messages or messages that are part of a
335 * failed transaction
336 */
337 if (!recv_hdr.somt && !msg->have_somt)
338 return false;
339
333 /* get length contained in this portion */ 340 /* get length contained in this portion */
334 msg->curchunk_len = recv_hdr.msg_len; 341 msg->curchunk_len = recv_hdr.msg_len;
335 msg->curchunk_hdrlen = hdrlen; 342 msg->curchunk_hdrlen = hdrlen;
@@ -2164,7 +2171,7 @@ out_unlock:
2164} 2171}
2165EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); 2172EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
2166 2173
2167static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) 2174static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
2168{ 2175{
2169 int len; 2176 int len;
2170 u8 replyblock[32]; 2177 u8 replyblock[32];
@@ -2179,12 +2186,12 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
2179 replyblock, len); 2186 replyblock, len);
2180 if (ret != len) { 2187 if (ret != len) {
2181 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret); 2188 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
2182 return; 2189 return false;
2183 } 2190 }
2184 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true); 2191 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
2185 if (!ret) { 2192 if (!ret) {
2186 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]); 2193 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
2187 return; 2194 return false;
2188 } 2195 }
2189 replylen = msg->curchunk_len + msg->curchunk_hdrlen; 2196 replylen = msg->curchunk_len + msg->curchunk_hdrlen;
2190 2197
@@ -2196,21 +2203,32 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
2196 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, 2203 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
2197 replyblock, len); 2204 replyblock, len);
2198 if (ret != len) { 2205 if (ret != len) {
2199 DRM_DEBUG_KMS("failed to read a chunk\n"); 2206 DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
2207 len, ret);
2208 return false;
2200 } 2209 }
2210
2201 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false); 2211 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
2202 if (ret == false) 2212 if (!ret) {
2203 DRM_DEBUG_KMS("failed to build sideband msg\n"); 2213 DRM_DEBUG_KMS("failed to build sideband msg\n");
2214 return false;
2215 }
2216
2204 curreply += len; 2217 curreply += len;
2205 replylen -= len; 2218 replylen -= len;
2206 } 2219 }
2220 return true;
2207} 2221}
2208 2222
2209static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) 2223static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2210{ 2224{
2211 int ret = 0; 2225 int ret = 0;
2212 2226
2213 drm_dp_get_one_sb_msg(mgr, false); 2227 if (!drm_dp_get_one_sb_msg(mgr, false)) {
2228 memset(&mgr->down_rep_recv, 0,
2229 sizeof(struct drm_dp_sideband_msg_rx));
2230 return 0;
2231 }
2214 2232
2215 if (mgr->down_rep_recv.have_eomt) { 2233 if (mgr->down_rep_recv.have_eomt) {
2216 struct drm_dp_sideband_msg_tx *txmsg; 2234 struct drm_dp_sideband_msg_tx *txmsg;
@@ -2266,7 +2284,12 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2266static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) 2284static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2267{ 2285{
2268 int ret = 0; 2286 int ret = 0;
2269 drm_dp_get_one_sb_msg(mgr, true); 2287
2288 if (!drm_dp_get_one_sb_msg(mgr, true)) {
2289 memset(&mgr->up_req_recv, 0,
2290 sizeof(struct drm_dp_sideband_msg_rx));
2291 return 0;
2292 }
2270 2293
2271 if (mgr->up_req_recv.have_eomt) { 2294 if (mgr->up_req_recv.have_eomt) {
2272 struct drm_dp_sideband_msg_req_body msg; 2295 struct drm_dp_sideband_msg_req_body msg;
@@ -2318,7 +2341,9 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2318 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); 2341 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
2319 } 2342 }
2320 2343
2321 drm_dp_put_mst_branch_device(mstb); 2344 if (mstb)
2345 drm_dp_put_mst_branch_device(mstb);
2346
2322 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2347 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2323 } 2348 }
2324 return ret; 2349 return ret;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 49546222c6d3..6276bb834b4f 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -54,7 +54,7 @@ static const uint32_t ipu_plane_formats[] = {
54 DRM_FORMAT_RGBA8888, 54 DRM_FORMAT_RGBA8888,
55 DRM_FORMAT_RGBX8888, 55 DRM_FORMAT_RGBX8888,
56 DRM_FORMAT_BGRA8888, 56 DRM_FORMAT_BGRA8888,
57 DRM_FORMAT_BGRA8888, 57 DRM_FORMAT_BGRX8888,
58 DRM_FORMAT_UYVY, 58 DRM_FORMAT_UYVY,
59 DRM_FORMAT_VYUY, 59 DRM_FORMAT_VYUY,
60 DRM_FORMAT_YUYV, 60 DRM_FORMAT_YUYV,
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 636031a30e17..8aca20209cb8 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -237,7 +237,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
237 237
238 /* port@1 is the output port */ 238 /* port@1 is the output port */
239 ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, &imxpd->bridge); 239 ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, &imxpd->bridge);
240 if (ret) 240 if (ret && ret != -ENODEV)
241 return ret; 241 return ret;
242 242
243 imxpd->dev = dev; 243 imxpd->dev = dev;
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 699fe7f9b8bf..a2ab6dcdf4a2 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -184,7 +184,6 @@ void radeon_kfd_device_init(struct radeon_device *rdev)
184 if (rdev->kfd) { 184 if (rdev->kfd) {
185 struct kgd2kfd_shared_resources gpu_resources = { 185 struct kgd2kfd_shared_resources gpu_resources = {
186 .compute_vmid_bitmap = 0xFF00, 186 .compute_vmid_bitmap = 0xFF00,
187 .num_mec = 1,
188 .num_pipe_per_mec = 4, 187 .num_pipe_per_mec = 4,
189 .num_queue_per_pipe = 8 188 .num_queue_per_pipe = 8
190 }; 189 };
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 403bbd5f99a9..a12cc7ea99b6 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -520,6 +520,34 @@ static void vc4_crtc_disable(struct drm_crtc *crtc)
520 SCALER_DISPSTATX_EMPTY); 520 SCALER_DISPSTATX_EMPTY);
521} 521}
522 522
523static void vc4_crtc_update_dlist(struct drm_crtc *crtc)
524{
525 struct drm_device *dev = crtc->dev;
526 struct vc4_dev *vc4 = to_vc4_dev(dev);
527 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
528 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
529
530 if (crtc->state->event) {
531 unsigned long flags;
532
533 crtc->state->event->pipe = drm_crtc_index(crtc);
534
535 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
536
537 spin_lock_irqsave(&dev->event_lock, flags);
538 vc4_crtc->event = crtc->state->event;
539 crtc->state->event = NULL;
540
541 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
542 vc4_state->mm.start);
543
544 spin_unlock_irqrestore(&dev->event_lock, flags);
545 } else {
546 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
547 vc4_state->mm.start);
548 }
549}
550
523static void vc4_crtc_enable(struct drm_crtc *crtc) 551static void vc4_crtc_enable(struct drm_crtc *crtc)
524{ 552{
525 struct drm_device *dev = crtc->dev; 553 struct drm_device *dev = crtc->dev;
@@ -530,6 +558,12 @@ static void vc4_crtc_enable(struct drm_crtc *crtc)
530 558
531 require_hvs_enabled(dev); 559 require_hvs_enabled(dev);
532 560
561 /* Enable vblank irq handling before crtc is started otherwise
562 * drm_crtc_get_vblank() fails in vc4_crtc_update_dlist().
563 */
564 drm_crtc_vblank_on(crtc);
565 vc4_crtc_update_dlist(crtc);
566
533 /* Turn on the scaler, which will wait for vstart to start 567 /* Turn on the scaler, which will wait for vstart to start
534 * compositing. 568 * compositing.
535 */ 569 */
@@ -541,9 +575,6 @@ static void vc4_crtc_enable(struct drm_crtc *crtc)
541 /* Turn on the pixel valve, which will emit the vstart signal. */ 575 /* Turn on the pixel valve, which will emit the vstart signal. */
542 CRTC_WRITE(PV_V_CONTROL, 576 CRTC_WRITE(PV_V_CONTROL,
543 CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN); 577 CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
544
545 /* Enable vblank irq handling after crtc is started. */
546 drm_crtc_vblank_on(crtc);
547} 578}
548 579
549static bool vc4_crtc_mode_fixup(struct drm_crtc *crtc, 580static bool vc4_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -598,7 +629,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
598{ 629{
599 struct drm_device *dev = crtc->dev; 630 struct drm_device *dev = crtc->dev;
600 struct vc4_dev *vc4 = to_vc4_dev(dev); 631 struct vc4_dev *vc4 = to_vc4_dev(dev);
601 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
602 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); 632 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
603 struct drm_plane *plane; 633 struct drm_plane *plane;
604 bool debug_dump_regs = false; 634 bool debug_dump_regs = false;
@@ -620,25 +650,15 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
620 650
621 WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size); 651 WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
622 652
623 if (crtc->state->event) { 653 /* Only update DISPLIST if the CRTC was already running and is not
624 unsigned long flags; 654 * being disabled.
625 655 * vc4_crtc_enable() takes care of updating the dlist just after
626 crtc->state->event->pipe = drm_crtc_index(crtc); 656 * re-enabling VBLANK interrupts and before enabling the engine.
627 657 * If the CRTC is being disabled, there's no point in updating this
628 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 658 * information.
629 659 */
630 spin_lock_irqsave(&dev->event_lock, flags); 660 if (crtc->state->active && old_state->active)
631 vc4_crtc->event = crtc->state->event; 661 vc4_crtc_update_dlist(crtc);
632 crtc->state->event = NULL;
633
634 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
635 vc4_state->mm.start);
636
637 spin_unlock_irqrestore(&dev->event_lock, flags);
638 } else {
639 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
640 vc4_state->mm.start);
641 }
642 662
643 if (debug_dump_regs) { 663 if (debug_dump_regs) {
644 DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); 664 DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index a5195a7d6f77..0a186c4f3981 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -55,6 +55,7 @@ struct dma_fence_cb;
55 * of the time. 55 * of the time.
56 * 56 *
57 * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled 57 * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled
58 * DMA_FENCE_FLAG_TIMESTAMP_BIT - timestamp recorded for fence signaling
58 * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called 59 * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called
59 * DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the 60 * DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
60 * implementer of the fence for its own purposes. Can be used in different 61 * implementer of the fence for its own purposes. Can be used in different
@@ -84,6 +85,7 @@ struct dma_fence {
84 85
85enum dma_fence_flag_bits { 86enum dma_fence_flag_bits {
86 DMA_FENCE_FLAG_SIGNALED_BIT, 87 DMA_FENCE_FLAG_SIGNALED_BIT,
88 DMA_FENCE_FLAG_TIMESTAMP_BIT,
87 DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, 89 DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
88 DMA_FENCE_FLAG_USER_BITS, /* must always be last member */ 90 DMA_FENCE_FLAG_USER_BITS, /* must always be last member */
89}; 91};