diff options
author | David S. Miller <davem@davemloft.net> | 2017-10-22 08:36:53 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-10-22 08:39:14 -0400 |
commit | f8ddadc4db6c7b7029b6d0e0d9af24f74ad27ca2 (patch) | |
tree | 0a6432aba336bae42313613f4c891bcfce02bd4e /drivers/gpu | |
parent | bdd091bab8c631bd2801af838e344fad34566410 (diff) | |
parent | b5ac3beb5a9f0ef0ea64cd85faf94c0dc4de0e42 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
There were quite a few overlapping sets of changes here.
Daniel's bug fix for off-by-ones in the new BPF branch instructions,
along with the added allowances for "data_end > ptr + x" forms
collided with the metadata additions.
Along with those three changes came veritifer test cases, which in
their final form I tried to group together properly. If I had just
trimmed GIT's conflict tags as-is, this would have split up the
meta tests unnecessarily.
In the socketmap code, a set of preemption disabling changes
overlapped with the rename of bpf_compute_data_end() to
bpf_compute_data_pointers().
Changes were made to the mv88e6060.c driver set addr method
which got removed in net-next.
The hyperv transport socket layer had a locking change in 'net'
which overlapped with a change of socket state macro usage
in 'net-next'.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/gpu')
31 files changed, 197 insertions, 118 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 7ef6c28a34d9..bc746131987f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
@@ -834,7 +834,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) | |||
834 | placement.busy_placement = &placements; | 834 | placement.busy_placement = &placements; |
835 | placements.fpfn = 0; | 835 | placements.fpfn = 0; |
836 | placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; | 836 | placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; |
837 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | 837 | placements.flags = bo->mem.placement | TTM_PL_FLAG_TT; |
838 | 838 | ||
839 | r = ttm_bo_mem_space(bo, &placement, &tmp, true, false); | 839 | r = ttm_bo_mem_space(bo, &placement, &tmp, true, false); |
840 | if (unlikely(r)) | 840 | if (unlikely(r)) |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 97c94f9683fa..38cea6fb25a8 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | |||
@@ -205,32 +205,17 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, | |||
205 | struct amd_sched_entity *entity) | 205 | struct amd_sched_entity *entity) |
206 | { | 206 | { |
207 | struct amd_sched_rq *rq = entity->rq; | 207 | struct amd_sched_rq *rq = entity->rq; |
208 | int r; | ||
209 | 208 | ||
210 | if (!amd_sched_entity_is_initialized(sched, entity)) | 209 | if (!amd_sched_entity_is_initialized(sched, entity)) |
211 | return; | 210 | return; |
211 | |||
212 | /** | 212 | /** |
213 | * The client will not queue more IBs during this fini, consume existing | 213 | * The client will not queue more IBs during this fini, consume existing |
214 | * queued IBs or discard them on SIGKILL | 214 | * queued IBs |
215 | */ | 215 | */ |
216 | if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL) | 216 | wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity)); |
217 | r = -ERESTARTSYS; | ||
218 | else | ||
219 | r = wait_event_killable(sched->job_scheduled, | ||
220 | amd_sched_entity_is_idle(entity)); | ||
221 | amd_sched_rq_remove_entity(rq, entity); | ||
222 | if (r) { | ||
223 | struct amd_sched_job *job; | ||
224 | 217 | ||
225 | /* Park the kernel for a moment to make sure it isn't processing | 218 | amd_sched_rq_remove_entity(rq, entity); |
226 | * our enity. | ||
227 | */ | ||
228 | kthread_park(sched->thread); | ||
229 | kthread_unpark(sched->thread); | ||
230 | while (kfifo_out(&entity->job_queue, &job, sizeof(job))) | ||
231 | sched->ops->free_job(job); | ||
232 | |||
233 | } | ||
234 | kfifo_free(&entity->job_queue); | 219 | kfifo_free(&entity->job_queue); |
235 | } | 220 | } |
236 | 221 | ||
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 4e53aae9a1fb..0028591f3f95 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c | |||
@@ -2960,6 +2960,7 @@ out: | |||
2960 | drm_modeset_backoff(&ctx); | 2960 | drm_modeset_backoff(&ctx); |
2961 | } | 2961 | } |
2962 | 2962 | ||
2963 | drm_atomic_state_put(state); | ||
2963 | drm_modeset_drop_locks(&ctx); | 2964 | drm_modeset_drop_locks(&ctx); |
2964 | drm_modeset_acquire_fini(&ctx); | 2965 | drm_modeset_acquire_fini(&ctx); |
2965 | 2966 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index e651a58c18cf..82b72425a42f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
@@ -168,11 +168,13 @@ static struct drm_driver exynos_drm_driver = { | |||
168 | static int exynos_drm_suspend(struct device *dev) | 168 | static int exynos_drm_suspend(struct device *dev) |
169 | { | 169 | { |
170 | struct drm_device *drm_dev = dev_get_drvdata(dev); | 170 | struct drm_device *drm_dev = dev_get_drvdata(dev); |
171 | struct exynos_drm_private *private = drm_dev->dev_private; | 171 | struct exynos_drm_private *private; |
172 | 172 | ||
173 | if (pm_runtime_suspended(dev) || !drm_dev) | 173 | if (pm_runtime_suspended(dev) || !drm_dev) |
174 | return 0; | 174 | return 0; |
175 | 175 | ||
176 | private = drm_dev->dev_private; | ||
177 | |||
176 | drm_kms_helper_poll_disable(drm_dev); | 178 | drm_kms_helper_poll_disable(drm_dev); |
177 | exynos_drm_fbdev_suspend(drm_dev); | 179 | exynos_drm_fbdev_suspend(drm_dev); |
178 | private->suspend_state = drm_atomic_helper_suspend(drm_dev); | 180 | private->suspend_state = drm_atomic_helper_suspend(drm_dev); |
@@ -188,11 +190,12 @@ static int exynos_drm_suspend(struct device *dev) | |||
188 | static int exynos_drm_resume(struct device *dev) | 190 | static int exynos_drm_resume(struct device *dev) |
189 | { | 191 | { |
190 | struct drm_device *drm_dev = dev_get_drvdata(dev); | 192 | struct drm_device *drm_dev = dev_get_drvdata(dev); |
191 | struct exynos_drm_private *private = drm_dev->dev_private; | 193 | struct exynos_drm_private *private; |
192 | 194 | ||
193 | if (pm_runtime_suspended(dev) || !drm_dev) | 195 | if (pm_runtime_suspended(dev) || !drm_dev) |
194 | return 0; | 196 | return 0; |
195 | 197 | ||
198 | private = drm_dev->dev_private; | ||
196 | drm_atomic_helper_resume(drm_dev, private->suspend_state); | 199 | drm_atomic_helper_resume(drm_dev, private->suspend_state); |
197 | exynos_drm_fbdev_resume(drm_dev); | 200 | exynos_drm_fbdev_resume(drm_dev); |
198 | drm_kms_helper_poll_enable(drm_dev); | 201 | drm_kms_helper_poll_enable(drm_dev); |
@@ -427,6 +430,7 @@ static void exynos_drm_unbind(struct device *dev) | |||
427 | 430 | ||
428 | kfree(drm->dev_private); | 431 | kfree(drm->dev_private); |
429 | drm->dev_private = NULL; | 432 | drm->dev_private = NULL; |
433 | dev_set_drvdata(dev, NULL); | ||
430 | 434 | ||
431 | drm_dev_unref(drm); | 435 | drm_dev_unref(drm); |
432 | } | 436 | } |
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 436377da41ba..03532dfc0cd5 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c | |||
@@ -308,20 +308,8 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu) | |||
308 | 308 | ||
309 | static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) | 309 | static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) |
310 | { | 310 | { |
311 | struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler; | ||
312 | int ring_id; | ||
313 | |||
314 | kfree(vgpu->sched_data); | 311 | kfree(vgpu->sched_data); |
315 | vgpu->sched_data = NULL; | 312 | vgpu->sched_data = NULL; |
316 | |||
317 | spin_lock_bh(&scheduler->mmio_context_lock); | ||
318 | for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { | ||
319 | if (scheduler->engine_owner[ring_id] == vgpu) { | ||
320 | intel_gvt_switch_mmio(vgpu, NULL, ring_id); | ||
321 | scheduler->engine_owner[ring_id] = NULL; | ||
322 | } | ||
323 | } | ||
324 | spin_unlock_bh(&scheduler->mmio_context_lock); | ||
325 | } | 313 | } |
326 | 314 | ||
327 | static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) | 315 | static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) |
@@ -388,6 +376,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) | |||
388 | { | 376 | { |
389 | struct intel_gvt_workload_scheduler *scheduler = | 377 | struct intel_gvt_workload_scheduler *scheduler = |
390 | &vgpu->gvt->scheduler; | 378 | &vgpu->gvt->scheduler; |
379 | int ring_id; | ||
391 | 380 | ||
392 | gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); | 381 | gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); |
393 | 382 | ||
@@ -401,4 +390,13 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) | |||
401 | scheduler->need_reschedule = true; | 390 | scheduler->need_reschedule = true; |
402 | scheduler->current_vgpu = NULL; | 391 | scheduler->current_vgpu = NULL; |
403 | } | 392 | } |
393 | |||
394 | spin_lock_bh(&scheduler->mmio_context_lock); | ||
395 | for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { | ||
396 | if (scheduler->engine_owner[ring_id] == vgpu) { | ||
397 | intel_gvt_switch_mmio(vgpu, NULL, ring_id); | ||
398 | scheduler->engine_owner[ring_id] = NULL; | ||
399 | } | ||
400 | } | ||
401 | spin_unlock_bh(&scheduler->mmio_context_lock); | ||
404 | } | 402 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 19404c96eeb1..32e857dc507c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2657,6 +2657,9 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, | |||
2657 | if (READ_ONCE(obj->mm.pages)) | 2657 | if (READ_ONCE(obj->mm.pages)) |
2658 | return -ENODEV; | 2658 | return -ENODEV; |
2659 | 2659 | ||
2660 | if (obj->mm.madv != I915_MADV_WILLNEED) | ||
2661 | return -EFAULT; | ||
2662 | |||
2660 | /* Before the pages are instantiated the object is treated as being | 2663 | /* Before the pages are instantiated the object is treated as being |
2661 | * in the CPU domain. The pages will be clflushed as required before | 2664 | * in the CPU domain. The pages will be clflushed as required before |
2662 | * use, and we can freely write into the pages directly. If userspace | 2665 | * use, and we can freely write into the pages directly. If userspace |
@@ -3013,10 +3016,15 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv) | |||
3013 | 3016 | ||
3014 | static void nop_submit_request(struct drm_i915_gem_request *request) | 3017 | static void nop_submit_request(struct drm_i915_gem_request *request) |
3015 | { | 3018 | { |
3019 | unsigned long flags; | ||
3020 | |||
3016 | GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error)); | 3021 | GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error)); |
3017 | dma_fence_set_error(&request->fence, -EIO); | 3022 | dma_fence_set_error(&request->fence, -EIO); |
3018 | i915_gem_request_submit(request); | 3023 | |
3024 | spin_lock_irqsave(&request->engine->timeline->lock, flags); | ||
3025 | __i915_gem_request_submit(request); | ||
3019 | intel_engine_init_global_seqno(request->engine, request->global_seqno); | 3026 | intel_engine_init_global_seqno(request->engine, request->global_seqno); |
3027 | spin_unlock_irqrestore(&request->engine->timeline->lock, flags); | ||
3020 | } | 3028 | } |
3021 | 3029 | ||
3022 | static void engine_set_wedged(struct intel_engine_cs *engine) | 3030 | static void engine_set_wedged(struct intel_engine_cs *engine) |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 4df039ef2ce3..e161d383b526 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -33,21 +33,20 @@ | |||
33 | #include "intel_drv.h" | 33 | #include "intel_drv.h" |
34 | #include "i915_trace.h" | 34 | #include "i915_trace.h" |
35 | 35 | ||
36 | static bool ggtt_is_idle(struct drm_i915_private *dev_priv) | 36 | static bool ggtt_is_idle(struct drm_i915_private *i915) |
37 | { | 37 | { |
38 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | 38 | struct intel_engine_cs *engine; |
39 | struct intel_engine_cs *engine; | 39 | enum intel_engine_id id; |
40 | enum intel_engine_id id; | ||
41 | 40 | ||
42 | for_each_engine(engine, dev_priv, id) { | 41 | if (i915->gt.active_requests) |
43 | struct intel_timeline *tl; | 42 | return false; |
44 | 43 | ||
45 | tl = &ggtt->base.timeline.engine[engine->id]; | 44 | for_each_engine(engine, i915, id) { |
46 | if (i915_gem_active_isset(&tl->last_request)) | 45 | if (engine->last_retired_context != i915->kernel_context) |
47 | return false; | 46 | return false; |
48 | } | 47 | } |
49 | 48 | ||
50 | return true; | 49 | return true; |
51 | } | 50 | } |
52 | 51 | ||
53 | static int ggtt_flush(struct drm_i915_private *i915) | 52 | static int ggtt_flush(struct drm_i915_private *i915) |
@@ -157,7 +156,8 @@ i915_gem_evict_something(struct i915_address_space *vm, | |||
157 | min_size, alignment, cache_level, | 156 | min_size, alignment, cache_level, |
158 | start, end, mode); | 157 | start, end, mode); |
159 | 158 | ||
160 | /* Retire before we search the active list. Although we have | 159 | /* |
160 | * Retire before we search the active list. Although we have | ||
161 | * reasonable accuracy in our retirement lists, we may have | 161 | * reasonable accuracy in our retirement lists, we may have |
162 | * a stray pin (preventing eviction) that can only be resolved by | 162 | * a stray pin (preventing eviction) that can only be resolved by |
163 | * retiring. | 163 | * retiring. |
@@ -182,7 +182,8 @@ search_again: | |||
182 | BUG_ON(ret); | 182 | BUG_ON(ret); |
183 | } | 183 | } |
184 | 184 | ||
185 | /* Can we unpin some objects such as idle hw contents, | 185 | /* |
186 | * Can we unpin some objects such as idle hw contents, | ||
186 | * or pending flips? But since only the GGTT has global entries | 187 | * or pending flips? But since only the GGTT has global entries |
187 | * such as scanouts, rinbuffers and contexts, we can skip the | 188 | * such as scanouts, rinbuffers and contexts, we can skip the |
188 | * purge when inspecting per-process local address spaces. | 189 | * purge when inspecting per-process local address spaces. |
@@ -190,19 +191,33 @@ search_again: | |||
190 | if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK) | 191 | if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK) |
191 | return -ENOSPC; | 192 | return -ENOSPC; |
192 | 193 | ||
193 | if (ggtt_is_idle(dev_priv)) { | 194 | /* |
194 | /* If we still have pending pageflip completions, drop | 195 | * Not everything in the GGTT is tracked via VMA using |
195 | * back to userspace to give our workqueues time to | 196 | * i915_vma_move_to_active(), otherwise we could evict as required |
196 | * acquire our locks and unpin the old scanouts. | 197 | * with minimal stalling. Instead we are forced to idle the GPU and |
197 | */ | 198 | * explicitly retire outstanding requests which will then remove |
198 | return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC; | 199 | * the pinning for active objects such as contexts and ring, |
199 | } | 200 | * enabling us to evict them on the next iteration. |
201 | * | ||
202 | * To ensure that all user contexts are evictable, we perform | ||
203 | * a switch to the perma-pinned kernel context. This all also gives | ||
204 | * us a termination condition, when the last retired context is | ||
205 | * the kernel's there is no more we can evict. | ||
206 | */ | ||
207 | if (!ggtt_is_idle(dev_priv)) { | ||
208 | ret = ggtt_flush(dev_priv); | ||
209 | if (ret) | ||
210 | return ret; | ||
200 | 211 | ||
201 | ret = ggtt_flush(dev_priv); | 212 | goto search_again; |
202 | if (ret) | 213 | } |
203 | return ret; | ||
204 | 214 | ||
205 | goto search_again; | 215 | /* |
216 | * If we still have pending pageflip completions, drop | ||
217 | * back to userspace to give our workqueues time to | ||
218 | * acquire our locks and unpin the old scanouts. | ||
219 | */ | ||
220 | return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC; | ||
206 | 221 | ||
207 | found: | 222 | found: |
208 | /* drm_mm doesn't allow any other other operations while | 223 | /* drm_mm doesn't allow any other other operations while |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ed7cd9ee2c2a..c9bcc6c45012 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -6998,6 +6998,7 @@ enum { | |||
6998 | */ | 6998 | */ |
6999 | #define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19) | 6999 | #define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19) |
7000 | #define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14) | 7000 | #define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14) |
7001 | #define L3_PRIO_CREDITS_MASK ((0x1f << 19) | (0x1f << 14)) | ||
7001 | 7002 | ||
7002 | #define GEN7_L3CNTLREG1 _MMIO(0xB01C) | 7003 | #define GEN7_L3CNTLREG1 _MMIO(0xB01C) |
7003 | #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C | 7004 | #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 00c6aee0a9a1..5d4cd3d00564 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -1240,7 +1240,7 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv, | |||
1240 | { | 1240 | { |
1241 | enum port port; | 1241 | enum port port; |
1242 | 1242 | ||
1243 | if (!HAS_DDI(dev_priv)) | 1243 | if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv)) |
1244 | return; | 1244 | return; |
1245 | 1245 | ||
1246 | if (!dev_priv->vbt.child_dev_num) | 1246 | if (!dev_priv->vbt.child_dev_num) |
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index ff9ecd211abb..b8315bca852b 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c | |||
@@ -74,7 +74,7 @@ | |||
74 | #define I9XX_CSC_COEFF_1_0 \ | 74 | #define I9XX_CSC_COEFF_1_0 \ |
75 | ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8)) | 75 | ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8)) |
76 | 76 | ||
77 | static bool crtc_state_is_legacy(struct drm_crtc_state *state) | 77 | static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state) |
78 | { | 78 | { |
79 | return !state->degamma_lut && | 79 | return !state->degamma_lut && |
80 | !state->ctm && | 80 | !state->ctm && |
@@ -288,7 +288,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state) | |||
288 | } | 288 | } |
289 | 289 | ||
290 | mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0); | 290 | mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0); |
291 | if (!crtc_state_is_legacy(state)) { | 291 | if (!crtc_state_is_legacy_gamma(state)) { |
292 | mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) | | 292 | mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) | |
293 | (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0); | 293 | (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0); |
294 | } | 294 | } |
@@ -469,7 +469,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state) | |||
469 | struct intel_crtc_state *intel_state = to_intel_crtc_state(state); | 469 | struct intel_crtc_state *intel_state = to_intel_crtc_state(state); |
470 | enum pipe pipe = to_intel_crtc(state->crtc)->pipe; | 470 | enum pipe pipe = to_intel_crtc(state->crtc)->pipe; |
471 | 471 | ||
472 | if (crtc_state_is_legacy(state)) { | 472 | if (crtc_state_is_legacy_gamma(state)) { |
473 | haswell_load_luts(state); | 473 | haswell_load_luts(state); |
474 | return; | 474 | return; |
475 | } | 475 | } |
@@ -529,7 +529,7 @@ static void glk_load_luts(struct drm_crtc_state *state) | |||
529 | 529 | ||
530 | glk_load_degamma_lut(state); | 530 | glk_load_degamma_lut(state); |
531 | 531 | ||
532 | if (crtc_state_is_legacy(state)) { | 532 | if (crtc_state_is_legacy_gamma(state)) { |
533 | haswell_load_luts(state); | 533 | haswell_load_luts(state); |
534 | return; | 534 | return; |
535 | } | 535 | } |
@@ -551,7 +551,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state) | |||
551 | uint32_t i, lut_size; | 551 | uint32_t i, lut_size; |
552 | uint32_t word0, word1; | 552 | uint32_t word0, word1; |
553 | 553 | ||
554 | if (crtc_state_is_legacy(state)) { | 554 | if (crtc_state_is_legacy_gamma(state)) { |
555 | /* Turn off degamma/gamma on CGM block. */ | 555 | /* Turn off degamma/gamma on CGM block. */ |
556 | I915_WRITE(CGM_PIPE_MODE(pipe), | 556 | I915_WRITE(CGM_PIPE_MODE(pipe), |
557 | (state->ctm ? CGM_PIPE_MODE_CSC : 0)); | 557 | (state->ctm ? CGM_PIPE_MODE_CSC : 0)); |
@@ -632,12 +632,10 @@ int intel_color_check(struct drm_crtc *crtc, | |||
632 | return 0; | 632 | return 0; |
633 | 633 | ||
634 | /* | 634 | /* |
635 | * We also allow no degamma lut and a gamma lut at the legacy | 635 | * We also allow no degamma lut/ctm and a gamma lut at the legacy |
636 | * size (256 entries). | 636 | * size (256 entries). |
637 | */ | 637 | */ |
638 | if (!crtc_state->degamma_lut && | 638 | if (crtc_state_is_legacy_gamma(crtc_state)) |
639 | crtc_state->gamma_lut && | ||
640 | crtc_state->gamma_lut->length == LEGACY_LUT_LENGTH) | ||
641 | return 0; | 639 | return 0; |
642 | 640 | ||
643 | return -EINVAL; | 641 | return -EINVAL; |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 476681d5940c..5e5fe03b638c 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -664,8 +664,8 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv, | |||
664 | int *n_entries) | 664 | int *n_entries) |
665 | { | 665 | { |
666 | if (IS_BROADWELL(dev_priv)) { | 666 | if (IS_BROADWELL(dev_priv)) { |
667 | *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); | 667 | *n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi); |
668 | return hsw_ddi_translations_fdi; | 668 | return bdw_ddi_translations_fdi; |
669 | } else if (IS_HASWELL(dev_priv)) { | 669 | } else if (IS_HASWELL(dev_priv)) { |
670 | *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); | 670 | *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); |
671 | return hsw_ddi_translations_fdi; | 671 | return hsw_ddi_translations_fdi; |
@@ -2102,8 +2102,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, | |||
2102 | * register writes. | 2102 | * register writes. |
2103 | */ | 2103 | */ |
2104 | val = I915_READ(DPCLKA_CFGCR0); | 2104 | val = I915_READ(DPCLKA_CFGCR0); |
2105 | val &= ~(DPCLKA_CFGCR0_DDI_CLK_OFF(port) | | 2105 | val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port); |
2106 | DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port)); | ||
2107 | I915_WRITE(DPCLKA_CFGCR0, val); | 2106 | I915_WRITE(DPCLKA_CFGCR0, val); |
2108 | } else if (IS_GEN9_BC(dev_priv)) { | 2107 | } else if (IS_GEN9_BC(dev_priv)) { |
2109 | /* DDI -> PLL mapping */ | 2108 | /* DDI -> PLL mapping */ |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 64f7b51ed97c..5c7828c52d12 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -10245,13 +10245,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |||
10245 | { | 10245 | { |
10246 | struct drm_i915_private *dev_priv = to_i915(dev); | 10246 | struct drm_i915_private *dev_priv = to_i915(dev); |
10247 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 10247 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
10248 | enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; | 10248 | enum transcoder cpu_transcoder; |
10249 | struct drm_display_mode *mode; | 10249 | struct drm_display_mode *mode; |
10250 | struct intel_crtc_state *pipe_config; | 10250 | struct intel_crtc_state *pipe_config; |
10251 | int htot = I915_READ(HTOTAL(cpu_transcoder)); | 10251 | u32 htot, hsync, vtot, vsync; |
10252 | int hsync = I915_READ(HSYNC(cpu_transcoder)); | ||
10253 | int vtot = I915_READ(VTOTAL(cpu_transcoder)); | ||
10254 | int vsync = I915_READ(VSYNC(cpu_transcoder)); | ||
10255 | enum pipe pipe = intel_crtc->pipe; | 10252 | enum pipe pipe = intel_crtc->pipe; |
10256 | 10253 | ||
10257 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); | 10254 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); |
@@ -10279,6 +10276,13 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |||
10279 | i9xx_crtc_clock_get(intel_crtc, pipe_config); | 10276 | i9xx_crtc_clock_get(intel_crtc, pipe_config); |
10280 | 10277 | ||
10281 | mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier; | 10278 | mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier; |
10279 | |||
10280 | cpu_transcoder = pipe_config->cpu_transcoder; | ||
10281 | htot = I915_READ(HTOTAL(cpu_transcoder)); | ||
10282 | hsync = I915_READ(HSYNC(cpu_transcoder)); | ||
10283 | vtot = I915_READ(VTOTAL(cpu_transcoder)); | ||
10284 | vsync = I915_READ(VSYNC(cpu_transcoder)); | ||
10285 | |||
10282 | mode->hdisplay = (htot & 0xffff) + 1; | 10286 | mode->hdisplay = (htot & 0xffff) + 1; |
10283 | mode->htotal = ((htot & 0xffff0000) >> 16) + 1; | 10287 | mode->htotal = ((htot & 0xffff0000) >> 16) + 1; |
10284 | mode->hsync_start = (hsync & 0xffff) + 1; | 10288 | mode->hsync_start = (hsync & 0xffff) + 1; |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 64134947c0aa..203198659ab2 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -2307,8 +2307,8 @@ static void edp_panel_off(struct intel_dp *intel_dp) | |||
2307 | I915_WRITE(pp_ctrl_reg, pp); | 2307 | I915_WRITE(pp_ctrl_reg, pp); |
2308 | POSTING_READ(pp_ctrl_reg); | 2308 | POSTING_READ(pp_ctrl_reg); |
2309 | 2309 | ||
2310 | intel_dp->panel_power_off_time = ktime_get_boottime(); | ||
2311 | wait_panel_off(intel_dp); | 2310 | wait_panel_off(intel_dp); |
2311 | intel_dp->panel_power_off_time = ktime_get_boottime(); | ||
2312 | 2312 | ||
2313 | /* We got a reference when we enabled the VDD. */ | 2313 | /* We got a reference when we enabled the VDD. */ |
2314 | intel_display_power_put(dev_priv, intel_dp->aux_power_domain); | 2314 | intel_display_power_put(dev_priv, intel_dp->aux_power_domain); |
@@ -5273,7 +5273,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, | |||
5273 | * seems sufficient to avoid this problem. | 5273 | * seems sufficient to avoid this problem. |
5274 | */ | 5274 | */ |
5275 | if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { | 5275 | if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { |
5276 | vbt.t11_t12 = max_t(u16, vbt.t11_t12, 900 * 10); | 5276 | vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10); |
5277 | DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n", | 5277 | DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n", |
5278 | vbt.t11_t12); | 5278 | vbt.t11_t12); |
5279 | } | 5279 | } |
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index a2a3d93d67bd..df808a94c511 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c | |||
@@ -1996,7 +1996,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv, | |||
1996 | 1996 | ||
1997 | /* 3. Configure DPLL_CFGCR0 */ | 1997 | /* 3. Configure DPLL_CFGCR0 */ |
1998 | /* Avoid touch CFGCR1 if HDMI mode is not enabled */ | 1998 | /* Avoid touch CFGCR1 if HDMI mode is not enabled */ |
1999 | if (pll->state.hw_state.cfgcr0 & DPLL_CTRL1_HDMI_MODE(pll->id)) { | 1999 | if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) { |
2000 | val = pll->state.hw_state.cfgcr1; | 2000 | val = pll->state.hw_state.cfgcr1; |
2001 | I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val); | 2001 | I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val); |
2002 | /* 4. Reab back to ensure writes completed */ | 2002 | /* 4. Reab back to ensure writes completed */ |
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 9ab596941372..3c2d9cf22ed5 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c | |||
@@ -1048,9 +1048,12 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) | |||
1048 | } | 1048 | } |
1049 | 1049 | ||
1050 | /* WaProgramL3SqcReg1DefaultForPerf:bxt */ | 1050 | /* WaProgramL3SqcReg1DefaultForPerf:bxt */ |
1051 | if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) | 1051 | if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) { |
1052 | I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | | 1052 | u32 val = I915_READ(GEN8_L3SQCREG1); |
1053 | L3_HIGH_PRIO_CREDITS(2)); | 1053 | val &= ~L3_PRIO_CREDITS_MASK; |
1054 | val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2); | ||
1055 | I915_WRITE(GEN8_L3SQCREG1, val); | ||
1056 | } | ||
1054 | 1057 | ||
1055 | /* WaToEnableHwFixForPushConstHWBug:bxt */ | 1058 | /* WaToEnableHwFixForPushConstHWBug:bxt */ |
1056 | if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) | 1059 | if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ed662937ec3c..0a09f8ff6aff 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -8245,14 +8245,17 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, | |||
8245 | int high_prio_credits) | 8245 | int high_prio_credits) |
8246 | { | 8246 | { |
8247 | u32 misccpctl; | 8247 | u32 misccpctl; |
8248 | u32 val; | ||
8248 | 8249 | ||
8249 | /* WaTempDisableDOPClkGating:bdw */ | 8250 | /* WaTempDisableDOPClkGating:bdw */ |
8250 | misccpctl = I915_READ(GEN7_MISCCPCTL); | 8251 | misccpctl = I915_READ(GEN7_MISCCPCTL); |
8251 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); | 8252 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); |
8252 | 8253 | ||
8253 | I915_WRITE(GEN8_L3SQCREG1, | 8254 | val = I915_READ(GEN8_L3SQCREG1); |
8254 | L3_GENERAL_PRIO_CREDITS(general_prio_credits) | | 8255 | val &= ~L3_PRIO_CREDITS_MASK; |
8255 | L3_HIGH_PRIO_CREDITS(high_prio_credits)); | 8256 | val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits); |
8257 | val |= L3_HIGH_PRIO_CREDITS(high_prio_credits); | ||
8258 | I915_WRITE(GEN8_L3SQCREG1, val); | ||
8256 | 8259 | ||
8257 | /* | 8260 | /* |
8258 | * Wait at least 100 clocks before re-enabling clock gating. | 8261 | * Wait at least 100 clocks before re-enabling clock gating. |
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index b3a087cb0860..49577eba8e7e 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
@@ -368,7 +368,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, | |||
368 | { | 368 | { |
369 | enum i915_power_well_id id = power_well->id; | 369 | enum i915_power_well_id id = power_well->id; |
370 | bool wait_fuses = power_well->hsw.has_fuses; | 370 | bool wait_fuses = power_well->hsw.has_fuses; |
371 | enum skl_power_gate pg; | 371 | enum skl_power_gate uninitialized_var(pg); |
372 | u32 val; | 372 | u32 val; |
373 | 373 | ||
374 | if (wait_fuses) { | 374 | if (wait_fuses) { |
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index dbb31a014419..deaf869374ea 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c | |||
@@ -248,7 +248,7 @@ disable_clks: | |||
248 | clk_disable_unprepare(ahb_clk); | 248 | clk_disable_unprepare(ahb_clk); |
249 | disable_gdsc: | 249 | disable_gdsc: |
250 | regulator_disable(gdsc_reg); | 250 | regulator_disable(gdsc_reg); |
251 | pm_runtime_put_autosuspend(dev); | 251 | pm_runtime_put_sync(dev); |
252 | put_clk: | 252 | put_clk: |
253 | clk_put(ahb_clk); | 253 | clk_put(ahb_clk); |
254 | put_gdsc: | 254 | put_gdsc: |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c index c2bdad88447e..824067d2d427 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c | |||
@@ -83,6 +83,8 @@ const struct mdp5_cfg_hw msm8x74v1_config = { | |||
83 | .caps = MDP_LM_CAP_WB }, | 83 | .caps = MDP_LM_CAP_WB }, |
84 | }, | 84 | }, |
85 | .nb_stages = 5, | 85 | .nb_stages = 5, |
86 | .max_width = 2048, | ||
87 | .max_height = 0xFFFF, | ||
86 | }, | 88 | }, |
87 | .dspp = { | 89 | .dspp = { |
88 | .count = 3, | 90 | .count = 3, |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 6fcb58ab718c..440977677001 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | |||
@@ -804,8 +804,6 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, | |||
804 | 804 | ||
805 | spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); | 805 | spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); |
806 | 806 | ||
807 | pm_runtime_put_autosuspend(&pdev->dev); | ||
808 | |||
809 | set_cursor: | 807 | set_cursor: |
810 | ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable); | 808 | ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable); |
811 | if (ret) { | 809 | if (ret) { |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index f15821a0d900..ea5bb0e1632c 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
@@ -610,17 +610,6 @@ int msm_gem_sync_object(struct drm_gem_object *obj, | |||
610 | struct dma_fence *fence; | 610 | struct dma_fence *fence; |
611 | int i, ret; | 611 | int i, ret; |
612 | 612 | ||
613 | if (!exclusive) { | ||
614 | /* NOTE: _reserve_shared() must happen before _add_shared_fence(), | ||
615 | * which makes this a slightly strange place to call it. OTOH this | ||
616 | * is a convenient can-fail point to hook it in. (And similar to | ||
617 | * how etnaviv and nouveau handle this.) | ||
618 | */ | ||
619 | ret = reservation_object_reserve_shared(msm_obj->resv); | ||
620 | if (ret) | ||
621 | return ret; | ||
622 | } | ||
623 | |||
624 | fobj = reservation_object_get_list(msm_obj->resv); | 613 | fobj = reservation_object_get_list(msm_obj->resv); |
625 | if (!fobj || (fobj->shared_count == 0)) { | 614 | if (!fobj || (fobj->shared_count == 0)) { |
626 | fence = reservation_object_get_excl(msm_obj->resv); | 615 | fence = reservation_object_get_excl(msm_obj->resv); |
@@ -1045,10 +1034,10 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, | |||
1045 | } | 1034 | } |
1046 | 1035 | ||
1047 | vaddr = msm_gem_get_vaddr(obj); | 1036 | vaddr = msm_gem_get_vaddr(obj); |
1048 | if (!vaddr) { | 1037 | if (IS_ERR(vaddr)) { |
1049 | msm_gem_put_iova(obj, aspace); | 1038 | msm_gem_put_iova(obj, aspace); |
1050 | drm_gem_object_unreference(obj); | 1039 | drm_gem_object_unreference(obj); |
1051 | return ERR_PTR(-ENOMEM); | 1040 | return ERR_CAST(vaddr); |
1052 | } | 1041 | } |
1053 | 1042 | ||
1054 | if (bo) | 1043 | if (bo) |
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 5d0a75d4b249..93535cac0676 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
@@ -221,7 +221,7 @@ fail: | |||
221 | return ret; | 221 | return ret; |
222 | } | 222 | } |
223 | 223 | ||
224 | static int submit_fence_sync(struct msm_gem_submit *submit) | 224 | static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit) |
225 | { | 225 | { |
226 | int i, ret = 0; | 226 | int i, ret = 0; |
227 | 227 | ||
@@ -229,6 +229,20 @@ static int submit_fence_sync(struct msm_gem_submit *submit) | |||
229 | struct msm_gem_object *msm_obj = submit->bos[i].obj; | 229 | struct msm_gem_object *msm_obj = submit->bos[i].obj; |
230 | bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE; | 230 | bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE; |
231 | 231 | ||
232 | if (!write) { | ||
233 | /* NOTE: _reserve_shared() must happen before | ||
234 | * _add_shared_fence(), which makes this a slightly | ||
235 | * strange place to call it. OTOH this is a | ||
236 | * convenient can-fail point to hook it in. | ||
237 | */ | ||
238 | ret = reservation_object_reserve_shared(msm_obj->resv); | ||
239 | if (ret) | ||
240 | return ret; | ||
241 | } | ||
242 | |||
243 | if (no_implicit) | ||
244 | continue; | ||
245 | |||
232 | ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write); | 246 | ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write); |
233 | if (ret) | 247 | if (ret) |
234 | break; | 248 | break; |
@@ -451,11 +465,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
451 | if (ret) | 465 | if (ret) |
452 | goto out; | 466 | goto out; |
453 | 467 | ||
454 | if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) { | 468 | ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT)); |
455 | ret = submit_fence_sync(submit); | 469 | if (ret) |
456 | if (ret) | 470 | goto out; |
457 | goto out; | ||
458 | } | ||
459 | 471 | ||
460 | ret = submit_pin_objects(submit); | 472 | ret = submit_pin_objects(submit); |
461 | if (ret) | 473 | if (ret) |
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index ffbff27600e0..6a887032c66a 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c | |||
@@ -718,7 +718,8 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) | |||
718 | msm_gem_put_iova(gpu->rb->bo, gpu->aspace); | 718 | msm_gem_put_iova(gpu->rb->bo, gpu->aspace); |
719 | msm_ringbuffer_destroy(gpu->rb); | 719 | msm_ringbuffer_destroy(gpu->rb); |
720 | } | 720 | } |
721 | if (gpu->aspace) { | 721 | |
722 | if (!IS_ERR_OR_NULL(gpu->aspace)) { | ||
722 | gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, | 723 | gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, |
723 | NULL, 0); | 724 | NULL, 0); |
724 | msm_gem_address_space_put(gpu->aspace); | 725 | msm_gem_address_space_put(gpu->aspace); |
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index 0366b8092f97..ec56794ad039 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c | |||
@@ -111,10 +111,14 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz) | |||
111 | 111 | ||
112 | wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0); | 112 | wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0); |
113 | 113 | ||
114 | /* Note that smp_load_acquire() is not strictly required | ||
115 | * as CIRC_SPACE_TO_END() does not access the tail more | ||
116 | * than once. | ||
117 | */ | ||
114 | n = min(sz, circ_space_to_end(&rd->fifo)); | 118 | n = min(sz, circ_space_to_end(&rd->fifo)); |
115 | memcpy(fptr, ptr, n); | 119 | memcpy(fptr, ptr, n); |
116 | 120 | ||
117 | fifo->head = (fifo->head + n) & (BUF_SZ - 1); | 121 | smp_store_release(&fifo->head, (fifo->head + n) & (BUF_SZ - 1)); |
118 | sz -= n; | 122 | sz -= n; |
119 | ptr += n; | 123 | ptr += n; |
120 | 124 | ||
@@ -145,13 +149,17 @@ static ssize_t rd_read(struct file *file, char __user *buf, | |||
145 | if (ret) | 149 | if (ret) |
146 | goto out; | 150 | goto out; |
147 | 151 | ||
152 | /* Note that smp_load_acquire() is not strictly required | ||
153 | * as CIRC_CNT_TO_END() does not access the head more than | ||
154 | * once. | ||
155 | */ | ||
148 | n = min_t(int, sz, circ_count_to_end(&rd->fifo)); | 156 | n = min_t(int, sz, circ_count_to_end(&rd->fifo)); |
149 | if (copy_to_user(buf, fptr, n)) { | 157 | if (copy_to_user(buf, fptr, n)) { |
150 | ret = -EFAULT; | 158 | ret = -EFAULT; |
151 | goto out; | 159 | goto out; |
152 | } | 160 | } |
153 | 161 | ||
154 | fifo->tail = (fifo->tail + n) & (BUF_SZ - 1); | 162 | smp_store_release(&fifo->tail, (fifo->tail + n) & (BUF_SZ - 1)); |
155 | *ppos += n; | 163 | *ppos += n; |
156 | 164 | ||
157 | wake_up_all(&rd->fifo_event); | 165 | wake_up_all(&rd->fifo_event); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index f7707849bb53..2b12d82aac15 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -223,7 +223,7 @@ void | |||
223 | nouveau_fbcon_accel_save_disable(struct drm_device *dev) | 223 | nouveau_fbcon_accel_save_disable(struct drm_device *dev) |
224 | { | 224 | { |
225 | struct nouveau_drm *drm = nouveau_drm(dev); | 225 | struct nouveau_drm *drm = nouveau_drm(dev); |
226 | if (drm->fbcon) { | 226 | if (drm->fbcon && drm->fbcon->helper.fbdev) { |
227 | drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags; | 227 | drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags; |
228 | drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; | 228 | drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; |
229 | } | 229 | } |
@@ -233,7 +233,7 @@ void | |||
233 | nouveau_fbcon_accel_restore(struct drm_device *dev) | 233 | nouveau_fbcon_accel_restore(struct drm_device *dev) |
234 | { | 234 | { |
235 | struct nouveau_drm *drm = nouveau_drm(dev); | 235 | struct nouveau_drm *drm = nouveau_drm(dev); |
236 | if (drm->fbcon) { | 236 | if (drm->fbcon && drm->fbcon->helper.fbdev) { |
237 | drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags; | 237 | drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags; |
238 | } | 238 | } |
239 | } | 239 | } |
@@ -245,7 +245,8 @@ nouveau_fbcon_accel_fini(struct drm_device *dev) | |||
245 | struct nouveau_fbdev *fbcon = drm->fbcon; | 245 | struct nouveau_fbdev *fbcon = drm->fbcon; |
246 | if (fbcon && drm->channel) { | 246 | if (fbcon && drm->channel) { |
247 | console_lock(); | 247 | console_lock(); |
248 | fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; | 248 | if (fbcon->helper.fbdev) |
249 | fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; | ||
249 | console_unlock(); | 250 | console_unlock(); |
250 | nouveau_channel_idle(drm->channel); | 251 | nouveau_channel_idle(drm->channel); |
251 | nvif_object_fini(&fbcon->twod); | 252 | nvif_object_fini(&fbcon->twod); |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 2dbf62a2ac41..e4751f92b342 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -3265,11 +3265,14 @@ nv50_mstm = { | |||
3265 | void | 3265 | void |
3266 | nv50_mstm_service(struct nv50_mstm *mstm) | 3266 | nv50_mstm_service(struct nv50_mstm *mstm) |
3267 | { | 3267 | { |
3268 | struct drm_dp_aux *aux = mstm->mgr.aux; | 3268 | struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL; |
3269 | bool handled = true; | 3269 | bool handled = true; |
3270 | int ret; | 3270 | int ret; |
3271 | u8 esi[8] = {}; | 3271 | u8 esi[8] = {}; |
3272 | 3272 | ||
3273 | if (!aux) | ||
3274 | return; | ||
3275 | |||
3273 | while (handled) { | 3276 | while (handled) { |
3274 | ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8); | 3277 | ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8); |
3275 | if (ret != 8) { | 3278 | if (ret != 8) { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c index 8e2e24a74774..44e116f7880d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c | |||
@@ -39,5 +39,5 @@ int | |||
39 | g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine) | 39 | g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine) |
40 | { | 40 | { |
41 | return nvkm_xtensa_new_(&g84_bsp, device, index, | 41 | return nvkm_xtensa_new_(&g84_bsp, device, index, |
42 | true, 0x103000, pengine); | 42 | device->chipset != 0x92, 0x103000, pengine); |
43 | } | 43 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c index d06ad2c372bf..455da298227f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c | |||
@@ -241,6 +241,8 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde) | |||
241 | mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem); | 241 | mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem); |
242 | } | 242 | } |
243 | 243 | ||
244 | mmu->func->flush(vm); | ||
245 | |||
244 | nvkm_memory_del(&pgt); | 246 | nvkm_memory_del(&pgt); |
245 | } | 247 | } |
246 | } | 248 | } |
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 6a573d21d3cc..658fa2d3e40c 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c | |||
@@ -405,6 +405,14 @@ int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts) | |||
405 | return -EINVAL; | 405 | return -EINVAL; |
406 | } | 406 | } |
407 | 407 | ||
408 | /* | ||
409 | * IPUv3EX / i.MX51 has a different register layout, and on IPUv3M / | ||
410 | * i.MX53 channel arbitration locking doesn't seem to work properly. | ||
411 | * Allow enabling the lock feature on IPUv3H / i.MX6 only. | ||
412 | */ | ||
413 | if (bursts && ipu->ipu_type != IPUV3H) | ||
414 | return -EINVAL; | ||
415 | |||
408 | for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) { | 416 | for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) { |
409 | if (channel->num == idmac_lock_en_info[i].chnum) | 417 | if (channel->num == idmac_lock_en_info[i].chnum) |
410 | break; | 418 | break; |
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c index c35f74c83065..c860a7997cb5 100644 --- a/drivers/gpu/ipu-v3/ipu-pre.c +++ b/drivers/gpu/ipu-v3/ipu-pre.c | |||
@@ -73,6 +73,14 @@ | |||
73 | #define IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v) ((v & 0x7) << 1) | 73 | #define IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v) ((v & 0x7) << 1) |
74 | #define IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v) ((v & 0x3) << 4) | 74 | #define IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v) ((v & 0x3) << 4) |
75 | 75 | ||
76 | #define IPU_PRE_STORE_ENG_STATUS 0x120 | ||
77 | #define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_MASK 0xffff | ||
78 | #define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_SHIFT 0 | ||
79 | #define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK 0x3fff | ||
80 | #define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT 16 | ||
81 | #define IPU_PRE_STORE_ENG_STATUS_STORE_FIFO_FULL (1 << 30) | ||
82 | #define IPU_PRE_STORE_ENG_STATUS_STORE_FIELD (1 << 31) | ||
83 | |||
76 | #define IPU_PRE_STORE_ENG_SIZE 0x130 | 84 | #define IPU_PRE_STORE_ENG_SIZE 0x130 |
77 | #define IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v) ((v & 0xffff) << 0) | 85 | #define IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v) ((v & 0xffff) << 0) |
78 | #define IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v) ((v & 0xffff) << 16) | 86 | #define IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v) ((v & 0xffff) << 16) |
@@ -93,6 +101,7 @@ struct ipu_pre { | |||
93 | dma_addr_t buffer_paddr; | 101 | dma_addr_t buffer_paddr; |
94 | void *buffer_virt; | 102 | void *buffer_virt; |
95 | bool in_use; | 103 | bool in_use; |
104 | unsigned int safe_window_end; | ||
96 | }; | 105 | }; |
97 | 106 | ||
98 | static DEFINE_MUTEX(ipu_pre_list_mutex); | 107 | static DEFINE_MUTEX(ipu_pre_list_mutex); |
@@ -160,6 +169,9 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width, | |||
160 | u32 active_bpp = info->cpp[0] >> 1; | 169 | u32 active_bpp = info->cpp[0] >> 1; |
161 | u32 val; | 170 | u32 val; |
162 | 171 | ||
172 | /* calculate safe window for ctrl register updates */ | ||
173 | pre->safe_window_end = height - 2; | ||
174 | |||
163 | writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); | 175 | writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); |
164 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); | 176 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); |
165 | 177 | ||
@@ -199,7 +211,24 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width, | |||
199 | 211 | ||
200 | void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr) | 212 | void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr) |
201 | { | 213 | { |
214 | unsigned long timeout = jiffies + msecs_to_jiffies(5); | ||
215 | unsigned short current_yblock; | ||
216 | u32 val; | ||
217 | |||
202 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); | 218 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); |
219 | |||
220 | do { | ||
221 | if (time_after(jiffies, timeout)) { | ||
222 | dev_warn(pre->dev, "timeout waiting for PRE safe window\n"); | ||
223 | return; | ||
224 | } | ||
225 | |||
226 | val = readl(pre->regs + IPU_PRE_STORE_ENG_STATUS); | ||
227 | current_yblock = | ||
228 | (val >> IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT) & | ||
229 | IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK; | ||
230 | } while (current_yblock == 0 || current_yblock >= pre->safe_window_end); | ||
231 | |||
203 | writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET); | 232 | writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET); |
204 | } | 233 | } |
205 | 234 | ||
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c index ecc9ea44dc50..0013ca9f72c8 100644 --- a/drivers/gpu/ipu-v3/ipu-prg.c +++ b/drivers/gpu/ipu-v3/ipu-prg.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <drm/drm_fourcc.h> | 14 | #include <drm/drm_fourcc.h> |
15 | #include <linux/clk.h> | 15 | #include <linux/clk.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/iopoll.h> | ||
17 | #include <linux/mfd/syscon.h> | 18 | #include <linux/mfd/syscon.h> |
18 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> | 19 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> |
19 | #include <linux/module.h> | 20 | #include <linux/module.h> |
@@ -329,6 +330,12 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan, | |||
329 | val = IPU_PRG_REG_UPDATE_REG_UPDATE; | 330 | val = IPU_PRG_REG_UPDATE_REG_UPDATE; |
330 | writel(val, prg->regs + IPU_PRG_REG_UPDATE); | 331 | writel(val, prg->regs + IPU_PRG_REG_UPDATE); |
331 | 332 | ||
333 | /* wait for both double buffers to be filled */ | ||
334 | readl_poll_timeout(prg->regs + IPU_PRG_STATUS, val, | ||
335 | (val & IPU_PRG_STATUS_BUFFER0_READY(prg_chan)) && | ||
336 | (val & IPU_PRG_STATUS_BUFFER1_READY(prg_chan)), | ||
337 | 5, 1000); | ||
338 | |||
332 | clk_disable_unprepare(prg->clk_ipg); | 339 | clk_disable_unprepare(prg->clk_ipg); |
333 | 340 | ||
334 | chan->enabled = true; | 341 | chan->enabled = true; |