diff options
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 23 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_drv.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/sched_policy.c | 22 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_evict.c | 63 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_reg.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ddi.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_dpll_mgr.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_engine_cs.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_fbcon.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_display.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c | 2 |
14 files changed, 90 insertions, 73 deletions
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 97c94f9683fa..38cea6fb25a8 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | |||
@@ -205,32 +205,17 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, | |||
205 | struct amd_sched_entity *entity) | 205 | struct amd_sched_entity *entity) |
206 | { | 206 | { |
207 | struct amd_sched_rq *rq = entity->rq; | 207 | struct amd_sched_rq *rq = entity->rq; |
208 | int r; | ||
209 | 208 | ||
210 | if (!amd_sched_entity_is_initialized(sched, entity)) | 209 | if (!amd_sched_entity_is_initialized(sched, entity)) |
211 | return; | 210 | return; |
211 | |||
212 | /** | 212 | /** |
213 | * The client will not queue more IBs during this fini, consume existing | 213 | * The client will not queue more IBs during this fini, consume existing |
214 | * queued IBs or discard them on SIGKILL | 214 | * queued IBs |
215 | */ | 215 | */ |
216 | if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL) | 216 | wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity)); |
217 | r = -ERESTARTSYS; | ||
218 | else | ||
219 | r = wait_event_killable(sched->job_scheduled, | ||
220 | amd_sched_entity_is_idle(entity)); | ||
221 | amd_sched_rq_remove_entity(rq, entity); | ||
222 | if (r) { | ||
223 | struct amd_sched_job *job; | ||
224 | 217 | ||
225 | /* Park the kernel for a moment to make sure it isn't processing | 218 | amd_sched_rq_remove_entity(rq, entity); |
226 | * our enity. | ||
227 | */ | ||
228 | kthread_park(sched->thread); | ||
229 | kthread_unpark(sched->thread); | ||
230 | while (kfifo_out(&entity->job_queue, &job, sizeof(job))) | ||
231 | sched->ops->free_job(job); | ||
232 | |||
233 | } | ||
234 | kfifo_free(&entity->job_queue); | 219 | kfifo_free(&entity->job_queue); |
235 | } | 220 | } |
236 | 221 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index e651a58c18cf..82b72425a42f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
@@ -168,11 +168,13 @@ static struct drm_driver exynos_drm_driver = { | |||
168 | static int exynos_drm_suspend(struct device *dev) | 168 | static int exynos_drm_suspend(struct device *dev) |
169 | { | 169 | { |
170 | struct drm_device *drm_dev = dev_get_drvdata(dev); | 170 | struct drm_device *drm_dev = dev_get_drvdata(dev); |
171 | struct exynos_drm_private *private = drm_dev->dev_private; | 171 | struct exynos_drm_private *private; |
172 | 172 | ||
173 | if (pm_runtime_suspended(dev) || !drm_dev) | 173 | if (pm_runtime_suspended(dev) || !drm_dev) |
174 | return 0; | 174 | return 0; |
175 | 175 | ||
176 | private = drm_dev->dev_private; | ||
177 | |||
176 | drm_kms_helper_poll_disable(drm_dev); | 178 | drm_kms_helper_poll_disable(drm_dev); |
177 | exynos_drm_fbdev_suspend(drm_dev); | 179 | exynos_drm_fbdev_suspend(drm_dev); |
178 | private->suspend_state = drm_atomic_helper_suspend(drm_dev); | 180 | private->suspend_state = drm_atomic_helper_suspend(drm_dev); |
@@ -188,11 +190,12 @@ static int exynos_drm_suspend(struct device *dev) | |||
188 | static int exynos_drm_resume(struct device *dev) | 190 | static int exynos_drm_resume(struct device *dev) |
189 | { | 191 | { |
190 | struct drm_device *drm_dev = dev_get_drvdata(dev); | 192 | struct drm_device *drm_dev = dev_get_drvdata(dev); |
191 | struct exynos_drm_private *private = drm_dev->dev_private; | 193 | struct exynos_drm_private *private; |
192 | 194 | ||
193 | if (pm_runtime_suspended(dev) || !drm_dev) | 195 | if (pm_runtime_suspended(dev) || !drm_dev) |
194 | return 0; | 196 | return 0; |
195 | 197 | ||
198 | private = drm_dev->dev_private; | ||
196 | drm_atomic_helper_resume(drm_dev, private->suspend_state); | 199 | drm_atomic_helper_resume(drm_dev, private->suspend_state); |
197 | exynos_drm_fbdev_resume(drm_dev); | 200 | exynos_drm_fbdev_resume(drm_dev); |
198 | drm_kms_helper_poll_enable(drm_dev); | 201 | drm_kms_helper_poll_enable(drm_dev); |
@@ -427,6 +430,7 @@ static void exynos_drm_unbind(struct device *dev) | |||
427 | 430 | ||
428 | kfree(drm->dev_private); | 431 | kfree(drm->dev_private); |
429 | drm->dev_private = NULL; | 432 | drm->dev_private = NULL; |
433 | dev_set_drvdata(dev, NULL); | ||
430 | 434 | ||
431 | drm_dev_unref(drm); | 435 | drm_dev_unref(drm); |
432 | } | 436 | } |
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 436377da41ba..03532dfc0cd5 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c | |||
@@ -308,20 +308,8 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu) | |||
308 | 308 | ||
309 | static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) | 309 | static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) |
310 | { | 310 | { |
311 | struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler; | ||
312 | int ring_id; | ||
313 | |||
314 | kfree(vgpu->sched_data); | 311 | kfree(vgpu->sched_data); |
315 | vgpu->sched_data = NULL; | 312 | vgpu->sched_data = NULL; |
316 | |||
317 | spin_lock_bh(&scheduler->mmio_context_lock); | ||
318 | for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { | ||
319 | if (scheduler->engine_owner[ring_id] == vgpu) { | ||
320 | intel_gvt_switch_mmio(vgpu, NULL, ring_id); | ||
321 | scheduler->engine_owner[ring_id] = NULL; | ||
322 | } | ||
323 | } | ||
324 | spin_unlock_bh(&scheduler->mmio_context_lock); | ||
325 | } | 313 | } |
326 | 314 | ||
327 | static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) | 315 | static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) |
@@ -388,6 +376,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) | |||
388 | { | 376 | { |
389 | struct intel_gvt_workload_scheduler *scheduler = | 377 | struct intel_gvt_workload_scheduler *scheduler = |
390 | &vgpu->gvt->scheduler; | 378 | &vgpu->gvt->scheduler; |
379 | int ring_id; | ||
391 | 380 | ||
392 | gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); | 381 | gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); |
393 | 382 | ||
@@ -401,4 +390,13 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) | |||
401 | scheduler->need_reschedule = true; | 390 | scheduler->need_reschedule = true; |
402 | scheduler->current_vgpu = NULL; | 391 | scheduler->current_vgpu = NULL; |
403 | } | 392 | } |
393 | |||
394 | spin_lock_bh(&scheduler->mmio_context_lock); | ||
395 | for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { | ||
396 | if (scheduler->engine_owner[ring_id] == vgpu) { | ||
397 | intel_gvt_switch_mmio(vgpu, NULL, ring_id); | ||
398 | scheduler->engine_owner[ring_id] = NULL; | ||
399 | } | ||
400 | } | ||
401 | spin_unlock_bh(&scheduler->mmio_context_lock); | ||
404 | } | 402 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index af289d35b77a..32e857dc507c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2657,6 +2657,9 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, | |||
2657 | if (READ_ONCE(obj->mm.pages)) | 2657 | if (READ_ONCE(obj->mm.pages)) |
2658 | return -ENODEV; | 2658 | return -ENODEV; |
2659 | 2659 | ||
2660 | if (obj->mm.madv != I915_MADV_WILLNEED) | ||
2661 | return -EFAULT; | ||
2662 | |||
2660 | /* Before the pages are instantiated the object is treated as being | 2663 | /* Before the pages are instantiated the object is treated as being |
2661 | * in the CPU domain. The pages will be clflushed as required before | 2664 | * in the CPU domain. The pages will be clflushed as required before |
2662 | * use, and we can freely write into the pages directly. If userspace | 2665 | * use, and we can freely write into the pages directly. If userspace |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 4df039ef2ce3..e161d383b526 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -33,21 +33,20 @@ | |||
33 | #include "intel_drv.h" | 33 | #include "intel_drv.h" |
34 | #include "i915_trace.h" | 34 | #include "i915_trace.h" |
35 | 35 | ||
36 | static bool ggtt_is_idle(struct drm_i915_private *dev_priv) | 36 | static bool ggtt_is_idle(struct drm_i915_private *i915) |
37 | { | 37 | { |
38 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | 38 | struct intel_engine_cs *engine; |
39 | struct intel_engine_cs *engine; | 39 | enum intel_engine_id id; |
40 | enum intel_engine_id id; | ||
41 | 40 | ||
42 | for_each_engine(engine, dev_priv, id) { | 41 | if (i915->gt.active_requests) |
43 | struct intel_timeline *tl; | 42 | return false; |
44 | 43 | ||
45 | tl = &ggtt->base.timeline.engine[engine->id]; | 44 | for_each_engine(engine, i915, id) { |
46 | if (i915_gem_active_isset(&tl->last_request)) | 45 | if (engine->last_retired_context != i915->kernel_context) |
47 | return false; | 46 | return false; |
48 | } | 47 | } |
49 | 48 | ||
50 | return true; | 49 | return true; |
51 | } | 50 | } |
52 | 51 | ||
53 | static int ggtt_flush(struct drm_i915_private *i915) | 52 | static int ggtt_flush(struct drm_i915_private *i915) |
@@ -157,7 +156,8 @@ i915_gem_evict_something(struct i915_address_space *vm, | |||
157 | min_size, alignment, cache_level, | 156 | min_size, alignment, cache_level, |
158 | start, end, mode); | 157 | start, end, mode); |
159 | 158 | ||
160 | /* Retire before we search the active list. Although we have | 159 | /* |
160 | * Retire before we search the active list. Although we have | ||
161 | * reasonable accuracy in our retirement lists, we may have | 161 | * reasonable accuracy in our retirement lists, we may have |
162 | * a stray pin (preventing eviction) that can only be resolved by | 162 | * a stray pin (preventing eviction) that can only be resolved by |
163 | * retiring. | 163 | * retiring. |
@@ -182,7 +182,8 @@ search_again: | |||
182 | BUG_ON(ret); | 182 | BUG_ON(ret); |
183 | } | 183 | } |
184 | 184 | ||
185 | /* Can we unpin some objects such as idle hw contents, | 185 | /* |
186 | * Can we unpin some objects such as idle hw contents, | ||
186 | * or pending flips? But since only the GGTT has global entries | 187 | * or pending flips? But since only the GGTT has global entries |
187 | * such as scanouts, rinbuffers and contexts, we can skip the | 188 | * such as scanouts, rinbuffers and contexts, we can skip the |
188 | * purge when inspecting per-process local address spaces. | 189 | * purge when inspecting per-process local address spaces. |
@@ -190,19 +191,33 @@ search_again: | |||
190 | if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK) | 191 | if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK) |
191 | return -ENOSPC; | 192 | return -ENOSPC; |
192 | 193 | ||
193 | if (ggtt_is_idle(dev_priv)) { | 194 | /* |
194 | /* If we still have pending pageflip completions, drop | 195 | * Not everything in the GGTT is tracked via VMA using |
195 | * back to userspace to give our workqueues time to | 196 | * i915_vma_move_to_active(), otherwise we could evict as required |
196 | * acquire our locks and unpin the old scanouts. | 197 | * with minimal stalling. Instead we are forced to idle the GPU and |
197 | */ | 198 | * explicitly retire outstanding requests which will then remove |
198 | return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC; | 199 | * the pinning for active objects such as contexts and ring, |
199 | } | 200 | * enabling us to evict them on the next iteration. |
201 | * | ||
202 | * To ensure that all user contexts are evictable, we perform | ||
203 | * a switch to the perma-pinned kernel context. This all also gives | ||
204 | * us a termination condition, when the last retired context is | ||
205 | * the kernel's there is no more we can evict. | ||
206 | */ | ||
207 | if (!ggtt_is_idle(dev_priv)) { | ||
208 | ret = ggtt_flush(dev_priv); | ||
209 | if (ret) | ||
210 | return ret; | ||
200 | 211 | ||
201 | ret = ggtt_flush(dev_priv); | 212 | goto search_again; |
202 | if (ret) | 213 | } |
203 | return ret; | ||
204 | 214 | ||
205 | goto search_again; | 215 | /* |
216 | * If we still have pending pageflip completions, drop | ||
217 | * back to userspace to give our workqueues time to | ||
218 | * acquire our locks and unpin the old scanouts. | ||
219 | */ | ||
220 | return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC; | ||
206 | 221 | ||
207 | found: | 222 | found: |
208 | /* drm_mm doesn't allow any other other operations while | 223 | /* drm_mm doesn't allow any other other operations while |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ed7cd9ee2c2a..c9bcc6c45012 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -6998,6 +6998,7 @@ enum { | |||
6998 | */ | 6998 | */ |
6999 | #define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19) | 6999 | #define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19) |
7000 | #define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14) | 7000 | #define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14) |
7001 | #define L3_PRIO_CREDITS_MASK ((0x1f << 19) | (0x1f << 14)) | ||
7001 | 7002 | ||
7002 | #define GEN7_L3CNTLREG1 _MMIO(0xB01C) | 7003 | #define GEN7_L3CNTLREG1 _MMIO(0xB01C) |
7003 | #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C | 7004 | #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 476681d5940c..5e5fe03b638c 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -664,8 +664,8 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv, | |||
664 | int *n_entries) | 664 | int *n_entries) |
665 | { | 665 | { |
666 | if (IS_BROADWELL(dev_priv)) { | 666 | if (IS_BROADWELL(dev_priv)) { |
667 | *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); | 667 | *n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi); |
668 | return hsw_ddi_translations_fdi; | 668 | return bdw_ddi_translations_fdi; |
669 | } else if (IS_HASWELL(dev_priv)) { | 669 | } else if (IS_HASWELL(dev_priv)) { |
670 | *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); | 670 | *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); |
671 | return hsw_ddi_translations_fdi; | 671 | return hsw_ddi_translations_fdi; |
@@ -2102,8 +2102,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, | |||
2102 | * register writes. | 2102 | * register writes. |
2103 | */ | 2103 | */ |
2104 | val = I915_READ(DPCLKA_CFGCR0); | 2104 | val = I915_READ(DPCLKA_CFGCR0); |
2105 | val &= ~(DPCLKA_CFGCR0_DDI_CLK_OFF(port) | | 2105 | val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port); |
2106 | DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port)); | ||
2107 | I915_WRITE(DPCLKA_CFGCR0, val); | 2106 | I915_WRITE(DPCLKA_CFGCR0, val); |
2108 | } else if (IS_GEN9_BC(dev_priv)) { | 2107 | } else if (IS_GEN9_BC(dev_priv)) { |
2109 | /* DDI -> PLL mapping */ | 2108 | /* DDI -> PLL mapping */ |
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index a2a3d93d67bd..df808a94c511 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c | |||
@@ -1996,7 +1996,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv, | |||
1996 | 1996 | ||
1997 | /* 3. Configure DPLL_CFGCR0 */ | 1997 | /* 3. Configure DPLL_CFGCR0 */ |
1998 | /* Avoid touch CFGCR1 if HDMI mode is not enabled */ | 1998 | /* Avoid touch CFGCR1 if HDMI mode is not enabled */ |
1999 | if (pll->state.hw_state.cfgcr0 & DPLL_CTRL1_HDMI_MODE(pll->id)) { | 1999 | if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) { |
2000 | val = pll->state.hw_state.cfgcr1; | 2000 | val = pll->state.hw_state.cfgcr1; |
2001 | I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val); | 2001 | I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val); |
2002 | /* 4. Reab back to ensure writes completed */ | 2002 | /* 4. Reab back to ensure writes completed */ |
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 9ab596941372..3c2d9cf22ed5 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c | |||
@@ -1048,9 +1048,12 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) | |||
1048 | } | 1048 | } |
1049 | 1049 | ||
1050 | /* WaProgramL3SqcReg1DefaultForPerf:bxt */ | 1050 | /* WaProgramL3SqcReg1DefaultForPerf:bxt */ |
1051 | if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) | 1051 | if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) { |
1052 | I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | | 1052 | u32 val = I915_READ(GEN8_L3SQCREG1); |
1053 | L3_HIGH_PRIO_CREDITS(2)); | 1053 | val &= ~L3_PRIO_CREDITS_MASK; |
1054 | val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2); | ||
1055 | I915_WRITE(GEN8_L3SQCREG1, val); | ||
1056 | } | ||
1054 | 1057 | ||
1055 | /* WaToEnableHwFixForPushConstHWBug:bxt */ | 1058 | /* WaToEnableHwFixForPushConstHWBug:bxt */ |
1056 | if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) | 1059 | if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ed662937ec3c..0a09f8ff6aff 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -8245,14 +8245,17 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, | |||
8245 | int high_prio_credits) | 8245 | int high_prio_credits) |
8246 | { | 8246 | { |
8247 | u32 misccpctl; | 8247 | u32 misccpctl; |
8248 | u32 val; | ||
8248 | 8249 | ||
8249 | /* WaTempDisableDOPClkGating:bdw */ | 8250 | /* WaTempDisableDOPClkGating:bdw */ |
8250 | misccpctl = I915_READ(GEN7_MISCCPCTL); | 8251 | misccpctl = I915_READ(GEN7_MISCCPCTL); |
8251 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); | 8252 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); |
8252 | 8253 | ||
8253 | I915_WRITE(GEN8_L3SQCREG1, | 8254 | val = I915_READ(GEN8_L3SQCREG1); |
8254 | L3_GENERAL_PRIO_CREDITS(general_prio_credits) | | 8255 | val &= ~L3_PRIO_CREDITS_MASK; |
8255 | L3_HIGH_PRIO_CREDITS(high_prio_credits)); | 8256 | val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits); |
8257 | val |= L3_HIGH_PRIO_CREDITS(high_prio_credits); | ||
8258 | I915_WRITE(GEN8_L3SQCREG1, val); | ||
8256 | 8259 | ||
8257 | /* | 8260 | /* |
8258 | * Wait at least 100 clocks before re-enabling clock gating. | 8261 | * Wait at least 100 clocks before re-enabling clock gating. |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index f7707849bb53..2b12d82aac15 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -223,7 +223,7 @@ void | |||
223 | nouveau_fbcon_accel_save_disable(struct drm_device *dev) | 223 | nouveau_fbcon_accel_save_disable(struct drm_device *dev) |
224 | { | 224 | { |
225 | struct nouveau_drm *drm = nouveau_drm(dev); | 225 | struct nouveau_drm *drm = nouveau_drm(dev); |
226 | if (drm->fbcon) { | 226 | if (drm->fbcon && drm->fbcon->helper.fbdev) { |
227 | drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags; | 227 | drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags; |
228 | drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; | 228 | drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; |
229 | } | 229 | } |
@@ -233,7 +233,7 @@ void | |||
233 | nouveau_fbcon_accel_restore(struct drm_device *dev) | 233 | nouveau_fbcon_accel_restore(struct drm_device *dev) |
234 | { | 234 | { |
235 | struct nouveau_drm *drm = nouveau_drm(dev); | 235 | struct nouveau_drm *drm = nouveau_drm(dev); |
236 | if (drm->fbcon) { | 236 | if (drm->fbcon && drm->fbcon->helper.fbdev) { |
237 | drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags; | 237 | drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags; |
238 | } | 238 | } |
239 | } | 239 | } |
@@ -245,7 +245,8 @@ nouveau_fbcon_accel_fini(struct drm_device *dev) | |||
245 | struct nouveau_fbdev *fbcon = drm->fbcon; | 245 | struct nouveau_fbdev *fbcon = drm->fbcon; |
246 | if (fbcon && drm->channel) { | 246 | if (fbcon && drm->channel) { |
247 | console_lock(); | 247 | console_lock(); |
248 | fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; | 248 | if (fbcon->helper.fbdev) |
249 | fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; | ||
249 | console_unlock(); | 250 | console_unlock(); |
250 | nouveau_channel_idle(drm->channel); | 251 | nouveau_channel_idle(drm->channel); |
251 | nvif_object_fini(&fbcon->twod); | 252 | nvif_object_fini(&fbcon->twod); |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 2dbf62a2ac41..e4751f92b342 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -3265,11 +3265,14 @@ nv50_mstm = { | |||
3265 | void | 3265 | void |
3266 | nv50_mstm_service(struct nv50_mstm *mstm) | 3266 | nv50_mstm_service(struct nv50_mstm *mstm) |
3267 | { | 3267 | { |
3268 | struct drm_dp_aux *aux = mstm->mgr.aux; | 3268 | struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL; |
3269 | bool handled = true; | 3269 | bool handled = true; |
3270 | int ret; | 3270 | int ret; |
3271 | u8 esi[8] = {}; | 3271 | u8 esi[8] = {}; |
3272 | 3272 | ||
3273 | if (!aux) | ||
3274 | return; | ||
3275 | |||
3273 | while (handled) { | 3276 | while (handled) { |
3274 | ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8); | 3277 | ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8); |
3275 | if (ret != 8) { | 3278 | if (ret != 8) { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c index 8e2e24a74774..44e116f7880d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c | |||
@@ -39,5 +39,5 @@ int | |||
39 | g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine) | 39 | g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine) |
40 | { | 40 | { |
41 | return nvkm_xtensa_new_(&g84_bsp, device, index, | 41 | return nvkm_xtensa_new_(&g84_bsp, device, index, |
42 | true, 0x103000, pengine); | 42 | device->chipset != 0x92, 0x103000, pengine); |
43 | } | 43 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c index d06ad2c372bf..455da298227f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c | |||
@@ -241,6 +241,8 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde) | |||
241 | mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem); | 241 | mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem); |
242 | } | 242 | } |
243 | 243 | ||
244 | mmu->func->flush(vm); | ||
245 | |||
244 | nvkm_memory_del(&pgt); | 246 | nvkm_memory_del(&pgt); |
245 | } | 247 | } |
246 | } | 248 | } |