aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-02-08 14:42:05 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-02-08 14:42:05 -0500
commitfe26adf431a58d620819618e52a10bf9b5cfde52 (patch)
tree7e5fd39db303f0291f24c6054f51f8a1121bd587
parent9e95dae76b53e67b64bb8e8468d2285b1dc34720 (diff)
parent94fc27ac487a80daf42f97b1a0503d029f3c1325 (diff)
Merge tag 'drm-for-v4.16-part2-fixes' of git://people.freedesktop.org/~airlied/linux
Pull more drm updates from Dave Airlie: "Ben missed sending his nouveau tree, but he really didn't have much stuff in it: - GP108 acceleration support is enabled by "secure boot" support - some clockgating work on Kepler, and bunch of fixes - the bulk of the diff is regenerated firmware files, the change to them really isn't that large. Otherwise this contains regular Intel and AMDGPU fixes" * tag 'drm-for-v4.16-part2-fixes' of git://people.freedesktop.org/~airlied/linux: (59 commits) drm/i915/bios: add DP max link rate to VBT child device struct drm/i915/cnp: Properly handle VBT ddc pin out of bounds. drm/i915/cnp: Ignore VBT request for know invalid DDC pin. drm/i915/cmdparser: Do not check past the cmd length. drm/i915/cmdparser: Check reg_table_count before derefencing. drm/i915/bxt, glk: Increase PCODE timeouts during CDCLK freq changing drm/i915/gvt: Use KVM r/w to access guest opregion drm/i915/gvt: Fix aperture read/write emulation when enable x-no-mmap=on drm/i915/gvt: only reset execlist state of one engine during VM engine reset drm/i915/gvt: refine intel_vgpu_submission_ops as per engine ops drm/amdgpu: re-enable CGCG on CZ and disable on ST drm/nouveau/clk: fix gcc-7 -Wint-in-bool-context warning drm/nouveau/mmu: Fix trailing semicolon drm/nouveau: Introduce NvPmEnableGating option drm/nouveau: Add support for SLCG for Kepler2 drm/nouveau: Add support for BLCG on Kepler2 drm/nouveau: Add support for BLCG on Kepler1 drm/nouveau: Add support for basic clockgating on Kepler1 drm/nouveau/kms/nv50: fix handling of gamma since atomic conversion drm/nouveau/kms/nv50: use INTERPOLATE_257_UNITY_RANGE LUT on newer chipsets ...
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c24
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h6
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c52
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c42
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h17
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c98
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c21
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c19
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c5
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c10
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h6
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c61
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c4
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c94
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c20
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c11
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c22
-rw-r--r--drivers/gpu/drm/i915/intel_display.c6
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c26
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c16
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c9
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c7
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c14
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c11
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c6
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h28
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c189
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c207
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.h55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c155
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.h35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c71
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h746
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h802
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h1006
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c144
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c67
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.h35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c136
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h48
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h21
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c3
90 files changed, 3168 insertions, 1788 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index bb40d2529a30..239bf2a4b3c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -179,8 +179,12 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
179 179
180 amdgpu_gfx_bit_to_queue(adev, queue_bit, &mec, &pipe, &queue); 180 amdgpu_gfx_bit_to_queue(adev, queue_bit, &mec, &pipe, &queue);
181 181
182 /* Using pipes 2/3 from MEC 2 seems cause problems */ 182 /*
183 if (mec == 1 && pipe > 1) 183 * 1. Using pipes 2/3 from MEC 2 seems cause problems.
184 * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
185 * only can be issued on queue 0.
186 */
187 if ((mec == 1 && pipe > 1) || queue != 0)
184 continue; 188 continue;
185 189
186 ring->me = mec + 1; 190 ring->me = mec + 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6fc16eecf2dc..5afbc5e714d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2262,12 +2262,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2262{ 2262{
2263 const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, 2263 const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
2264 AMDGPU_VM_PTE_COUNT(adev) * 8); 2264 AMDGPU_VM_PTE_COUNT(adev) * 8);
2265 uint64_t init_pde_value = 0, flags;
2265 unsigned ring_instance; 2266 unsigned ring_instance;
2266 struct amdgpu_ring *ring; 2267 struct amdgpu_ring *ring;
2267 struct drm_sched_rq *rq; 2268 struct drm_sched_rq *rq;
2269 unsigned long size;
2268 int r, i; 2270 int r, i;
2269 u64 flags;
2270 uint64_t init_pde_value = 0;
2271 2271
2272 vm->va = RB_ROOT_CACHED; 2272 vm->va = RB_ROOT_CACHED;
2273 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) 2273 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
@@ -2318,29 +2318,21 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
2318 flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS | 2318 flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
2319 AMDGPU_GEM_CREATE_SHADOW); 2319 AMDGPU_GEM_CREATE_SHADOW);
2320 2320
2321 r = amdgpu_bo_create(adev, 2321 size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
2322 amdgpu_vm_bo_size(adev, adev->vm_manager.root_level), 2322 r = amdgpu_bo_create(adev, size, align, true, AMDGPU_GEM_DOMAIN_VRAM,
2323 align, true, 2323 flags, NULL, NULL, init_pde_value,
2324 AMDGPU_GEM_DOMAIN_VRAM, 2324 &vm->root.base.bo);
2325 flags,
2326 NULL, NULL, init_pde_value, &vm->root.base.bo);
2327 if (r) 2325 if (r)
2328 goto error_free_sched_entity; 2326 goto error_free_sched_entity;
2329 2327
2328 r = amdgpu_bo_reserve(vm->root.base.bo, true);
2329 if (r)
2330 goto error_free_root;
2331
2330 vm->root.base.vm = vm; 2332 vm->root.base.vm = vm;
2331 list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va); 2333 list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
2332 INIT_LIST_HEAD(&vm->root.base.vm_status); 2334 list_add_tail(&vm->root.base.vm_status, &vm->evicted);
2333 2335 amdgpu_bo_unreserve(vm->root.base.bo);
2334 if (vm->use_cpu_for_update) {
2335 r = amdgpu_bo_reserve(vm->root.base.bo, false);
2336 if (r)
2337 goto error_free_root;
2338
2339 r = amdgpu_bo_kmap(vm->root.base.bo, NULL);
2340 amdgpu_bo_unreserve(vm->root.base.bo);
2341 if (r)
2342 goto error_free_root;
2343 }
2344 2336
2345 if (pasid) { 2337 if (pasid) {
2346 unsigned long flags; 2338 unsigned long flags;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index b69ceafb7888..ee14d78be2a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -278,9 +278,9 @@ static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
278 /* Track retry faults in per-VM fault FIFO. */ 278 /* Track retry faults in per-VM fault FIFO. */
279 spin_lock(&adev->vm_manager.pasid_lock); 279 spin_lock(&adev->vm_manager.pasid_lock);
280 vm = idr_find(&adev->vm_manager.pasid_idr, pasid); 280 vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
281 spin_unlock(&adev->vm_manager.pasid_lock); 281 if (!vm) {
282 if (WARN_ON_ONCE(!vm)) {
283 /* VM not found, process it normally */ 282 /* VM not found, process it normally */
283 spin_unlock(&adev->vm_manager.pasid_lock);
284 amdgpu_ih_clear_fault(adev, key); 284 amdgpu_ih_clear_fault(adev, key);
285 return true; 285 return true;
286 } 286 }
@@ -288,9 +288,11 @@ static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
288 r = kfifo_put(&vm->faults, key); 288 r = kfifo_put(&vm->faults, key);
289 if (!r) { 289 if (!r) {
290 /* FIFO is full. Ignore it until there is space */ 290 /* FIFO is full. Ignore it until there is space */
291 spin_unlock(&adev->vm_manager.pasid_lock);
291 amdgpu_ih_clear_fault(adev, key); 292 amdgpu_ih_clear_fault(adev, key);
292 goto ignore_iv; 293 goto ignore_iv;
293 } 294 }
295 spin_unlock(&adev->vm_manager.pasid_lock);
294 296
295 /* It's the first fault for this address, process it normally */ 297 /* It's the first fault for this address, process it normally */
296 return true; 298 return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index da2b99c2d95f..1e3e05a11f7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1049,7 +1049,6 @@ static int vi_common_early_init(void *handle)
1049 AMD_CG_SUPPORT_GFX_CP_LS | 1049 AMD_CG_SUPPORT_GFX_CP_LS |
1050 AMD_CG_SUPPORT_GFX_CGTS | 1050 AMD_CG_SUPPORT_GFX_CGTS |
1051 AMD_CG_SUPPORT_GFX_CGTS_LS | 1051 AMD_CG_SUPPORT_GFX_CGTS_LS |
1052 AMD_CG_SUPPORT_GFX_CGCG |
1053 AMD_CG_SUPPORT_GFX_CGLS | 1052 AMD_CG_SUPPORT_GFX_CGLS |
1054 AMD_CG_SUPPORT_BIF_LS | 1053 AMD_CG_SUPPORT_BIF_LS |
1055 AMD_CG_SUPPORT_HDP_MGCG | 1054 AMD_CG_SUPPORT_HDP_MGCG |
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 97bfc00d2a82..c62346fdc05d 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -119,16 +119,6 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map)
119 if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked) 119 if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
120 return 0; 120 return 0;
121 121
122 if (map) {
123 vgpu->gm.aperture_va = memremap(aperture_pa, aperture_sz,
124 MEMREMAP_WC);
125 if (!vgpu->gm.aperture_va)
126 return -ENOMEM;
127 } else {
128 memunmap(vgpu->gm.aperture_va);
129 vgpu->gm.aperture_va = NULL;
130 }
131
132 val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2]; 122 val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
133 if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) 123 if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
134 val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2); 124 val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
@@ -141,11 +131,8 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map)
141 aperture_pa >> PAGE_SHIFT, 131 aperture_pa >> PAGE_SHIFT,
142 aperture_sz >> PAGE_SHIFT, 132 aperture_sz >> PAGE_SHIFT,
143 map); 133 map);
144 if (ret) { 134 if (ret)
145 memunmap(vgpu->gm.aperture_va);
146 vgpu->gm.aperture_va = NULL;
147 return ret; 135 return ret;
148 }
149 136
150 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map; 137 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
151 return 0; 138 return 0;
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 2ab584f97dfb..2fb7b34ef561 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -472,7 +472,6 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
472 ret = PTR_ERR(dmabuf); 472 ret = PTR_ERR(dmabuf);
473 goto out_free_gem; 473 goto out_free_gem;
474 } 474 }
475 obj->base.dma_buf = dmabuf;
476 475
477 i915_gem_object_put(obj); 476 i915_gem_object_put(obj);
478 477
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 769c1c24ae75..70494e394d2c 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -521,24 +521,23 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
521 521
522 ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id, 522 ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
523 _EL_OFFSET_STATUS_PTR); 523 _EL_OFFSET_STATUS_PTR);
524
525 ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); 524 ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
526 ctx_status_ptr.read_ptr = 0; 525 ctx_status_ptr.read_ptr = 0;
527 ctx_status_ptr.write_ptr = 0x7; 526 ctx_status_ptr.write_ptr = 0x7;
528 vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; 527 vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
529} 528}
530 529
531static void clean_execlist(struct intel_vgpu *vgpu) 530static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask)
532{ 531{
533 enum intel_engine_id i; 532 unsigned int tmp;
533 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
534 struct intel_engine_cs *engine; 534 struct intel_engine_cs *engine;
535 struct intel_vgpu_submission *s = &vgpu->submission;
535 536
536 for_each_engine(engine, vgpu->gvt->dev_priv, i) { 537 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
537 struct intel_vgpu_submission *s = &vgpu->submission; 538 kfree(s->ring_scan_buffer[engine->id]);
538 539 s->ring_scan_buffer[engine->id] = NULL;
539 kfree(s->ring_scan_buffer[i]); 540 s->ring_scan_buffer_size[engine->id] = 0;
540 s->ring_scan_buffer[i] = NULL;
541 s->ring_scan_buffer_size[i] = 0;
542 } 541 }
543} 542}
544 543
@@ -553,9 +552,10 @@ static void reset_execlist(struct intel_vgpu *vgpu,
553 init_vgpu_execlist(vgpu, engine->id); 552 init_vgpu_execlist(vgpu, engine->id);
554} 553}
555 554
556static int init_execlist(struct intel_vgpu *vgpu) 555static int init_execlist(struct intel_vgpu *vgpu,
556 unsigned long engine_mask)
557{ 557{
558 reset_execlist(vgpu, ALL_ENGINES); 558 reset_execlist(vgpu, engine_mask);
559 return 0; 559 return 0;
560} 560}
561 561
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index a529d2bd393c..8d5317d0122d 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -997,9 +997,11 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
997static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) 997static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
998{ 998{
999 struct intel_vgpu *vgpu = spt->vgpu; 999 struct intel_vgpu *vgpu = spt->vgpu;
1000 struct intel_gvt *gvt = vgpu->gvt;
1001 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1000 struct intel_vgpu_ppgtt_spt *s; 1002 struct intel_vgpu_ppgtt_spt *s;
1001 struct intel_gvt_gtt_entry se, ge; 1003 struct intel_gvt_gtt_entry se, ge;
1002 unsigned long i; 1004 unsigned long gfn, i;
1003 int ret; 1005 int ret;
1004 1006
1005 trace_spt_change(spt->vgpu->id, "born", spt, 1007 trace_spt_change(spt->vgpu->id, "born", spt,
@@ -1007,9 +1009,10 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
1007 1009
1008 if (gtt_type_is_pte_pt(spt->shadow_page.type)) { 1010 if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
1009 for_each_present_guest_entry(spt, &ge, i) { 1011 for_each_present_guest_entry(spt, &ge, i) {
1010 ret = gtt_entry_p2m(vgpu, &ge, &se); 1012 gfn = ops->get_pfn(&ge);
1011 if (ret) 1013 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn) ||
1012 goto fail; 1014 gtt_entry_p2m(vgpu, &ge, &se))
1015 ops->set_pfn(&se, gvt->gtt.scratch_mfn);
1013 ppgtt_set_shadow_entry(spt, &se, i); 1016 ppgtt_set_shadow_entry(spt, &se, i);
1014 } 1017 }
1015 return 0; 1018 return 0;
@@ -1906,7 +1909,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1906 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; 1909 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
1907 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 1910 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1908 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift; 1911 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
1909 unsigned long gma; 1912 unsigned long gma, gfn;
1910 struct intel_gvt_gtt_entry e, m; 1913 struct intel_gvt_gtt_entry e, m;
1911 int ret; 1914 int ret;
1912 1915
@@ -1925,6 +1928,16 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1925 bytes); 1928 bytes);
1926 1929
1927 if (ops->test_present(&e)) { 1930 if (ops->test_present(&e)) {
1931 gfn = ops->get_pfn(&e);
1932
1933 /* one PTE update may be issued in multiple writes and the
1934 * first write may not construct a valid gfn
1935 */
1936 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
1937 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
1938 goto out;
1939 }
1940
1928 ret = gtt_entry_p2m(vgpu, &e, &m); 1941 ret = gtt_entry_p2m(vgpu, &e, &m);
1929 if (ret) { 1942 if (ret) {
1930 gvt_vgpu_err("fail to translate guest gtt entry\n"); 1943 gvt_vgpu_err("fail to translate guest gtt entry\n");
@@ -1939,6 +1952,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1939 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 1952 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
1940 } 1953 }
1941 1954
1955out:
1942 ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index); 1956 ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
1943 gtt_invalidate(gvt->dev_priv); 1957 gtt_invalidate(gvt->dev_priv);
1944 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); 1958 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 7dc7a80213a8..c6197d990818 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -82,7 +82,6 @@ struct intel_gvt_device_info {
82struct intel_vgpu_gm { 82struct intel_vgpu_gm {
83 u64 aperture_sz; 83 u64 aperture_sz;
84 u64 hidden_sz; 84 u64 hidden_sz;
85 void *aperture_va;
86 struct drm_mm_node low_gm_node; 85 struct drm_mm_node low_gm_node;
87 struct drm_mm_node high_gm_node; 86 struct drm_mm_node high_gm_node;
88}; 87};
@@ -127,7 +126,6 @@ struct intel_vgpu_irq {
127struct intel_vgpu_opregion { 126struct intel_vgpu_opregion {
128 bool mapped; 127 bool mapped;
129 void *va; 128 void *va;
130 void *va_gopregion;
131 u32 gfn[INTEL_GVT_OPREGION_PAGES]; 129 u32 gfn[INTEL_GVT_OPREGION_PAGES];
132}; 130};
133 131
@@ -152,8 +150,8 @@ enum {
152 150
153struct intel_vgpu_submission_ops { 151struct intel_vgpu_submission_ops {
154 const char *name; 152 const char *name;
155 int (*init)(struct intel_vgpu *vgpu); 153 int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask);
156 void (*clean)(struct intel_vgpu *vgpu); 154 void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask);
157 void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask); 155 void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask);
158}; 156};
159 157
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 92d6468daeee..9be639aa3b55 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1494,7 +1494,6 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1494static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 1494static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1495 void *p_data, unsigned int bytes) 1495 void *p_data, unsigned int bytes)
1496{ 1496{
1497 struct intel_vgpu_submission *s = &vgpu->submission;
1498 u32 data = *(u32 *)p_data; 1497 u32 data = *(u32 *)p_data;
1499 int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); 1498 int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
1500 bool enable_execlist; 1499 bool enable_execlist;
@@ -1523,11 +1522,9 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1523 if (!enable_execlist) 1522 if (!enable_execlist)
1524 return 0; 1523 return 0;
1525 1524
1526 if (s->active)
1527 return 0;
1528
1529 ret = intel_vgpu_select_submission_ops(vgpu, 1525 ret = intel_vgpu_select_submission_ops(vgpu,
1530 INTEL_VGPU_EXECLIST_SUBMISSION); 1526 ENGINE_MASK(ring_id),
1527 INTEL_VGPU_EXECLIST_SUBMISSION);
1531 if (ret) 1528 if (ret)
1532 return ret; 1529 return ret;
1533 1530
@@ -2843,6 +2840,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2843 MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)), D_SKL_PLUS); 2840 MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)), D_SKL_PLUS);
2844 MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)), D_SKL_PLUS); 2841 MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)), D_SKL_PLUS);
2845 MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)), D_SKL_PLUS); 2842 MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)), D_SKL_PLUS);
2843 MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A)), D_SKL_PLUS);
2844 MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B)), D_SKL_PLUS);
2845 MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C)), D_SKL_PLUS);
2846 MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)), D_SKL_PLUS); 2846 MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)), D_SKL_PLUS);
2847 MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)), D_SKL_PLUS); 2847 MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)), D_SKL_PLUS);
2848 MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS); 2848 MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS);
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index a1bd82feb827..f8e77e166246 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -58,6 +58,7 @@ struct intel_gvt_mpt {
58 int (*set_opregion)(void *vgpu); 58 int (*set_opregion)(void *vgpu);
59 int (*get_vfio_device)(void *vgpu); 59 int (*get_vfio_device)(void *vgpu);
60 void (*put_vfio_device)(void *vgpu); 60 void (*put_vfio_device)(void *vgpu);
61 bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
61}; 62};
62 63
63extern struct intel_gvt_mpt xengt_mpt; 64extern struct intel_gvt_mpt xengt_mpt;
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 554d1db1f3c8..909499b73d03 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -651,6 +651,39 @@ static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
651 return ret; 651 return ret;
652} 652}
653 653
654static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, uint64_t off)
655{
656 return off >= vgpu_aperture_offset(vgpu) &&
657 off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
658}
659
660static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t off,
661 void *buf, unsigned long count, bool is_write)
662{
663 void *aperture_va;
664
665 if (!intel_vgpu_in_aperture(vgpu, off) ||
666 !intel_vgpu_in_aperture(vgpu, off + count)) {
667 gvt_vgpu_err("Invalid aperture offset %llu\n", off);
668 return -EINVAL;
669 }
670
671 aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap,
672 ALIGN_DOWN(off, PAGE_SIZE),
673 count + offset_in_page(off));
674 if (!aperture_va)
675 return -EIO;
676
677 if (is_write)
678 memcpy(aperture_va + offset_in_page(off), buf, count);
679 else
680 memcpy(buf, aperture_va + offset_in_page(off), count);
681
682 io_mapping_unmap(aperture_va);
683
684 return 0;
685}
686
654static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, 687static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
655 size_t count, loff_t *ppos, bool is_write) 688 size_t count, loff_t *ppos, bool is_write)
656{ 689{
@@ -679,8 +712,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
679 buf, count, is_write); 712 buf, count, is_write);
680 break; 713 break;
681 case VFIO_PCI_BAR2_REGION_INDEX: 714 case VFIO_PCI_BAR2_REGION_INDEX:
682 ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_2, pos, 715 ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write);
683 buf, count, is_write);
684 break; 716 break;
685 case VFIO_PCI_BAR1_REGION_INDEX: 717 case VFIO_PCI_BAR1_REGION_INDEX:
686 case VFIO_PCI_BAR3_REGION_INDEX: 718 case VFIO_PCI_BAR3_REGION_INDEX:
@@ -1575,6 +1607,21 @@ static unsigned long kvmgt_virt_to_pfn(void *addr)
1575 return PFN_DOWN(__pa(addr)); 1607 return PFN_DOWN(__pa(addr));
1576} 1608}
1577 1609
1610static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
1611{
1612 struct kvmgt_guest_info *info;
1613 struct kvm *kvm;
1614
1615 if (!handle_valid(handle))
1616 return false;
1617
1618 info = (struct kvmgt_guest_info *)handle;
1619 kvm = info->kvm;
1620
1621 return kvm_is_visible_gfn(kvm, gfn);
1622
1623}
1624
1578struct intel_gvt_mpt kvmgt_mpt = { 1625struct intel_gvt_mpt kvmgt_mpt = {
1579 .host_init = kvmgt_host_init, 1626 .host_init = kvmgt_host_init,
1580 .host_exit = kvmgt_host_exit, 1627 .host_exit = kvmgt_host_exit,
@@ -1590,6 +1637,7 @@ struct intel_gvt_mpt kvmgt_mpt = {
1590 .set_opregion = kvmgt_set_opregion, 1637 .set_opregion = kvmgt_set_opregion,
1591 .get_vfio_device = kvmgt_get_vfio_device, 1638 .get_vfio_device = kvmgt_get_vfio_device,
1592 .put_vfio_device = kvmgt_put_vfio_device, 1639 .put_vfio_device = kvmgt_put_vfio_device,
1640 .is_valid_gfn = kvmgt_is_valid_gfn,
1593}; 1641};
1594EXPORT_SYMBOL_GPL(kvmgt_mpt); 1642EXPORT_SYMBOL_GPL(kvmgt_mpt);
1595 1643
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 562b5ad857a4..5c869e3fdf3b 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -56,38 +56,6 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
56 (reg >= gvt->device_info.gtt_start_offset \ 56 (reg >= gvt->device_info.gtt_start_offset \
57 && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) 57 && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
58 58
59static bool vgpu_gpa_is_aperture(struct intel_vgpu *vgpu, uint64_t gpa)
60{
61 u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2);
62 u64 aperture_sz = vgpu_aperture_sz(vgpu);
63
64 return gpa >= aperture_gpa && gpa < aperture_gpa + aperture_sz;
65}
66
67static int vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t gpa,
68 void *pdata, unsigned int size, bool is_read)
69{
70 u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2);
71 u64 offset = gpa - aperture_gpa;
72
73 if (!vgpu_gpa_is_aperture(vgpu, gpa + size - 1)) {
74 gvt_vgpu_err("Aperture rw out of range, offset %llx, size %d\n",
75 offset, size);
76 return -EINVAL;
77 }
78
79 if (!vgpu->gm.aperture_va) {
80 gvt_vgpu_err("BAR is not enabled\n");
81 return -ENXIO;
82 }
83
84 if (is_read)
85 memcpy(pdata, vgpu->gm.aperture_va + offset, size);
86 else
87 memcpy(vgpu->gm.aperture_va + offset, pdata, size);
88 return 0;
89}
90
91static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, 59static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
92 void *p_data, unsigned int bytes, bool read) 60 void *p_data, unsigned int bytes, bool read)
93{ 61{
@@ -144,11 +112,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
144 } 112 }
145 mutex_lock(&gvt->lock); 113 mutex_lock(&gvt->lock);
146 114
147 if (vgpu_gpa_is_aperture(vgpu, pa)) {
148 ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true);
149 goto out;
150 }
151
152 offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); 115 offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
153 116
154 if (WARN_ON(bytes > 8)) 117 if (WARN_ON(bytes > 8))
@@ -222,11 +185,6 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
222 185
223 mutex_lock(&gvt->lock); 186 mutex_lock(&gvt->lock);
224 187
225 if (vgpu_gpa_is_aperture(vgpu, pa)) {
226 ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false);
227 goto out;
228 }
229
230 offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); 188 offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
231 189
232 if (WARN_ON(bytes > 8)) 190 if (WARN_ON(bytes > 8))
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 74834395dd89..73ad6e90e49d 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -80,7 +80,7 @@ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
80 {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ 80 {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
81 {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ 81 {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
82 {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */ 82 {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
83 { /* Terminated */ } 83 {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
84}; 84};
85 85
86static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { 86static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
@@ -146,7 +146,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
146 {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ 146 {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
147 {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ 147 {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
148 {RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */ 148 {RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
149 { /* Terminated */ } 149 {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
150}; 150};
151 151
152static struct { 152static struct {
@@ -167,7 +167,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
167 }; 167 };
168 int ring_id, i; 168 int ring_id, i;
169 169
170 for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { 170 for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
171 offset.reg = regs[ring_id]; 171 offset.reg = regs[ring_id];
172 for (i = 0; i < 64; i++) { 172 for (i = 0; i < 64; i++) {
173 gen9_render_mocs.control_table[ring_id][i] = 173 gen9_render_mocs.control_table[ring_id][i] =
@@ -310,8 +310,8 @@ static void switch_mmio(struct intel_vgpu *pre,
310 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) 310 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
311 switch_mocs(pre, next, ring_id); 311 switch_mocs(pre, next, ring_id);
312 312
313 mmio = dev_priv->gvt->engine_mmio_list; 313 for (mmio = dev_priv->gvt->engine_mmio_list;
314 while (i915_mmio_reg_offset((mmio++)->reg)) { 314 i915_mmio_reg_valid(mmio->reg); mmio++) {
315 if (mmio->ring_id != ring_id) 315 if (mmio->ring_id != ring_id)
316 continue; 316 continue;
317 // save 317 // save
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index ca8005a6d5fa..81aff4eacbfe 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -339,4 +339,21 @@ static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
339 intel_gvt_host.mpt->put_vfio_device(vgpu); 339 intel_gvt_host.mpt->put_vfio_device(vgpu);
340} 340}
341 341
342/**
343 * intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn
344 * @vgpu: a vGPU
345 * @gfn: guest PFN
346 *
347 * Returns:
348 * true on valid gfn, false on not.
349 */
350static inline bool intel_gvt_hypervisor_is_valid_gfn(
351 struct intel_vgpu *vgpu, unsigned long gfn)
352{
353 if (!intel_gvt_host.mpt->is_valid_gfn)
354 return true;
355
356 return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
357}
358
342#endif /* _GVT_MPT_H_ */ 359#endif /* _GVT_MPT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 8420d1fc3ddb..fa75a2eead90 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -299,21 +299,13 @@ int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa)
299{ 299{
300 300
301 int i, ret = 0; 301 int i, ret = 0;
302 unsigned long pfn;
303 302
304 gvt_dbg_core("emulate opregion from kernel\n"); 303 gvt_dbg_core("emulate opregion from kernel\n");
305 304
306 switch (intel_gvt_host.hypervisor_type) { 305 switch (intel_gvt_host.hypervisor_type) {
307 case INTEL_GVT_HYPERVISOR_KVM: 306 case INTEL_GVT_HYPERVISOR_KVM:
308 pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gpa >> PAGE_SHIFT); 307 for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
309 vgpu_opregion(vgpu)->va_gopregion = memremap(pfn << PAGE_SHIFT, 308 vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
310 INTEL_GVT_OPREGION_SIZE,
311 MEMREMAP_WB);
312 if (!vgpu_opregion(vgpu)->va_gopregion) {
313 gvt_vgpu_err("failed to map guest opregion\n");
314 ret = -EFAULT;
315 }
316 vgpu_opregion(vgpu)->mapped = true;
317 break; 309 break;
318 case INTEL_GVT_HYPERVISOR_XEN: 310 case INTEL_GVT_HYPERVISOR_XEN:
319 /** 311 /**
@@ -352,10 +344,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
352 if (vgpu_opregion(vgpu)->mapped) 344 if (vgpu_opregion(vgpu)->mapped)
353 map_vgpu_opregion(vgpu, false); 345 map_vgpu_opregion(vgpu, false);
354 } else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) { 346 } else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
355 if (vgpu_opregion(vgpu)->mapped) { 347 /* Guest opregion is released by VFIO */
356 memunmap(vgpu_opregion(vgpu)->va_gopregion);
357 vgpu_opregion(vgpu)->va_gopregion = NULL;
358 }
359 } 348 }
360 free_pages((unsigned long)vgpu_opregion(vgpu)->va, 349 free_pages((unsigned long)vgpu_opregion(vgpu)->va,
361 get_order(INTEL_GVT_OPREGION_SIZE)); 350 get_order(INTEL_GVT_OPREGION_SIZE));
@@ -480,19 +469,40 @@ static bool querying_capabilities(u32 scic)
480 */ 469 */
481int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) 470int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
482{ 471{
483 u32 *scic, *parm; 472 u32 scic, parm;
484 u32 func, subfunc; 473 u32 func, subfunc;
474 u64 scic_pa = 0, parm_pa = 0;
475 int ret;
485 476
486 switch (intel_gvt_host.hypervisor_type) { 477 switch (intel_gvt_host.hypervisor_type) {
487 case INTEL_GVT_HYPERVISOR_XEN: 478 case INTEL_GVT_HYPERVISOR_XEN:
488 scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC; 479 scic = *((u32 *)vgpu_opregion(vgpu)->va +
489 parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM; 480 INTEL_GVT_OPREGION_SCIC);
481 parm = *((u32 *)vgpu_opregion(vgpu)->va +
482 INTEL_GVT_OPREGION_PARM);
490 break; 483 break;
491 case INTEL_GVT_HYPERVISOR_KVM: 484 case INTEL_GVT_HYPERVISOR_KVM:
492 scic = vgpu_opregion(vgpu)->va_gopregion + 485 scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
493 INTEL_GVT_OPREGION_SCIC; 486 INTEL_GVT_OPREGION_SCIC;
494 parm = vgpu_opregion(vgpu)->va_gopregion + 487 parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
495 INTEL_GVT_OPREGION_PARM; 488 INTEL_GVT_OPREGION_PARM;
489
490 ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa,
491 &scic, sizeof(scic));
492 if (ret) {
493 gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
494 ret, scic_pa, sizeof(scic));
495 return ret;
496 }
497
498 ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa,
499 &parm, sizeof(parm));
500 if (ret) {
501 gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
502 ret, scic_pa, sizeof(scic));
503 return ret;
504 }
505
496 break; 506 break;
497 default: 507 default:
498 gvt_vgpu_err("not supported hypervisor\n"); 508 gvt_vgpu_err("not supported hypervisor\n");
@@ -510,9 +520,9 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
510 return 0; 520 return 0;
511 } 521 }
512 522
513 func = GVT_OPREGION_FUNC(*scic); 523 func = GVT_OPREGION_FUNC(scic);
514 subfunc = GVT_OPREGION_SUBFUNC(*scic); 524 subfunc = GVT_OPREGION_SUBFUNC(scic);
515 if (!querying_capabilities(*scic)) { 525 if (!querying_capabilities(scic)) {
516 gvt_vgpu_err("requesting runtime service: func \"%s\"," 526 gvt_vgpu_err("requesting runtime service: func \"%s\","
517 " subfunc \"%s\"\n", 527 " subfunc \"%s\"\n",
518 opregion_func_name(func), 528 opregion_func_name(func),
@@ -521,11 +531,43 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
521 * emulate exit status of function call, '0' means 531 * emulate exit status of function call, '0' means
522 * "failure, generic, unsupported or unknown cause" 532 * "failure, generic, unsupported or unknown cause"
523 */ 533 */
524 *scic &= ~OPREGION_SCIC_EXIT_MASK; 534 scic &= ~OPREGION_SCIC_EXIT_MASK;
525 return 0; 535 goto out;
536 }
537
538 scic = 0;
539 parm = 0;
540
541out:
542 switch (intel_gvt_host.hypervisor_type) {
543 case INTEL_GVT_HYPERVISOR_XEN:
544 *((u32 *)vgpu_opregion(vgpu)->va +
545 INTEL_GVT_OPREGION_SCIC) = scic;
546 *((u32 *)vgpu_opregion(vgpu)->va +
547 INTEL_GVT_OPREGION_PARM) = parm;
548 break;
549 case INTEL_GVT_HYPERVISOR_KVM:
550 ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa,
551 &scic, sizeof(scic));
552 if (ret) {
553 gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
554 ret, scic_pa, sizeof(scic));
555 return ret;
556 }
557
558 ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa,
559 &parm, sizeof(parm));
560 if (ret) {
561 gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
562 ret, scic_pa, sizeof(scic));
563 return ret;
564 }
565
566 break;
567 default:
568 gvt_vgpu_err("not supported hypervisor\n");
569 return -EINVAL;
526 } 570 }
527 571
528 *scic = 0;
529 *parm = 0;
530 return 0; 572 return 0;
531} 573}
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index eea1a2f92099..cc1ce361cd76 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -50,6 +50,7 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
50struct vgpu_sched_data { 50struct vgpu_sched_data {
51 struct list_head lru_list; 51 struct list_head lru_list;
52 struct intel_vgpu *vgpu; 52 struct intel_vgpu *vgpu;
53 bool active;
53 54
54 ktime_t sched_in_time; 55 ktime_t sched_in_time;
55 ktime_t sched_out_time; 56 ktime_t sched_out_time;
@@ -308,8 +309,15 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
308 309
309static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) 310static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
310{ 311{
312 struct intel_gvt *gvt = vgpu->gvt;
313 struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
314
311 kfree(vgpu->sched_data); 315 kfree(vgpu->sched_data);
312 vgpu->sched_data = NULL; 316 vgpu->sched_data = NULL;
317
318 /* this vgpu id has been removed */
319 if (idr_is_empty(&gvt->vgpu_idr))
320 hrtimer_cancel(&sched_data->timer);
313} 321}
314 322
315static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) 323static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
@@ -325,6 +333,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
325 if (!hrtimer_active(&sched_data->timer)) 333 if (!hrtimer_active(&sched_data->timer))
326 hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), 334 hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
327 sched_data->period), HRTIMER_MODE_ABS); 335 sched_data->period), HRTIMER_MODE_ABS);
336 vgpu_data->active = true;
328} 337}
329 338
330static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) 339static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
@@ -332,6 +341,7 @@ static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
332 struct vgpu_sched_data *vgpu_data = vgpu->sched_data; 341 struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
333 342
334 list_del_init(&vgpu_data->lru_list); 343 list_del_init(&vgpu_data->lru_list);
344 vgpu_data->active = false;
335} 345}
336 346
337static struct intel_gvt_sched_policy_ops tbs_schedule_ops = { 347static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
@@ -367,9 +377,12 @@ void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
367 377
368void intel_vgpu_start_schedule(struct intel_vgpu *vgpu) 378void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
369{ 379{
370 gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id); 380 struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
371 381
372 vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu); 382 if (!vgpu_data->active) {
383 gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
384 vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
385 }
373} 386}
374 387
375void intel_gvt_kick_schedule(struct intel_gvt *gvt) 388void intel_gvt_kick_schedule(struct intel_gvt *gvt)
@@ -382,6 +395,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
382 struct intel_gvt_workload_scheduler *scheduler = 395 struct intel_gvt_workload_scheduler *scheduler =
383 &vgpu->gvt->scheduler; 396 &vgpu->gvt->scheduler;
384 int ring_id; 397 int ring_id;
398 struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
399
400 if (!vgpu_data->active)
401 return;
385 402
386 gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); 403 gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
387 404
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 0056638b0c16..b55b3580ca1d 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -991,7 +991,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
991{ 991{
992 struct intel_vgpu_submission *s = &vgpu->submission; 992 struct intel_vgpu_submission *s = &vgpu->submission;
993 993
994 intel_vgpu_select_submission_ops(vgpu, 0); 994 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
995 i915_gem_context_put(s->shadow_ctx); 995 i915_gem_context_put(s->shadow_ctx);
996 kmem_cache_destroy(s->workloads); 996 kmem_cache_destroy(s->workloads);
997} 997}
@@ -1079,6 +1079,7 @@ out_shadow_ctx:
1079 * 1079 *
1080 */ 1080 */
1081int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, 1081int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
1082 unsigned long engine_mask,
1082 unsigned int interface) 1083 unsigned int interface)
1083{ 1084{
1084 struct intel_vgpu_submission *s = &vgpu->submission; 1085 struct intel_vgpu_submission *s = &vgpu->submission;
@@ -1091,21 +1092,21 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
1091 if (WARN_ON(interface >= ARRAY_SIZE(ops))) 1092 if (WARN_ON(interface >= ARRAY_SIZE(ops)))
1092 return -EINVAL; 1093 return -EINVAL;
1093 1094
1094 if (s->active) { 1095 if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES))
1095 s->ops->clean(vgpu); 1096 return -EINVAL;
1096 s->active = false; 1097
1097 gvt_dbg_core("vgpu%d: de-select ops [ %s ] \n", 1098 if (s->active)
1098 vgpu->id, s->ops->name); 1099 s->ops->clean(vgpu, engine_mask);
1099 }
1100 1100
1101 if (interface == 0) { 1101 if (interface == 0) {
1102 s->ops = NULL; 1102 s->ops = NULL;
1103 s->virtual_submission_interface = 0; 1103 s->virtual_submission_interface = 0;
1104 gvt_dbg_core("vgpu%d: no submission ops\n", vgpu->id); 1104 s->active = false;
1105 gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id);
1105 return 0; 1106 return 0;
1106 } 1107 }
1107 1108
1108 ret = ops[interface]->init(vgpu); 1109 ret = ops[interface]->init(vgpu, engine_mask);
1109 if (ret) 1110 if (ret)
1110 return ret; 1111 return ret;
1111 1112
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 3de77dfa7c59..ff175a98b19e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -141,6 +141,7 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
141void intel_vgpu_clean_submission(struct intel_vgpu *vgpu); 141void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
142 142
143int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, 143int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
144 unsigned long engine_mask,
144 unsigned int interface); 145 unsigned int interface);
145 146
146extern const struct intel_vgpu_submission_ops 147extern const struct intel_vgpu_submission_ops
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 4688619f6a1c..b87b19d8443c 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -258,6 +258,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
258 258
259 intel_gvt_debugfs_remove_vgpu(vgpu); 259 intel_gvt_debugfs_remove_vgpu(vgpu);
260 idr_remove(&gvt->vgpu_idr, vgpu->id); 260 idr_remove(&gvt->vgpu_idr, vgpu->id);
261 if (idr_is_empty(&gvt->vgpu_idr))
262 intel_gvt_clean_irq(gvt);
261 intel_vgpu_clean_sched_policy(vgpu); 263 intel_vgpu_clean_sched_policy(vgpu);
262 intel_vgpu_clean_submission(vgpu); 264 intel_vgpu_clean_submission(vgpu);
263 intel_vgpu_clean_display(vgpu); 265 intel_vgpu_clean_display(vgpu);
@@ -518,8 +520,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
518 intel_vgpu_reset_submission(vgpu, resetting_eng); 520 intel_vgpu_reset_submission(vgpu, resetting_eng);
519 /* full GPU reset or device model level reset */ 521 /* full GPU reset or device model level reset */
520 if (engine_mask == ALL_ENGINES || dmlr) { 522 if (engine_mask == ALL_ENGINES || dmlr) {
521 intel_vgpu_select_submission_ops(vgpu, 0); 523 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
522
523 /*fence will not be reset during virtual reset */ 524 /*fence will not be reset during virtual reset */
524 if (dmlr) { 525 if (dmlr) {
525 intel_vgpu_reset_gtt(vgpu); 526 intel_vgpu_reset_gtt(vgpu);
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index ccb5ba043b63..95478db9998b 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1032,7 +1032,7 @@ find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr)
1032 const struct drm_i915_reg_table *table = engine->reg_tables; 1032 const struct drm_i915_reg_table *table = engine->reg_tables;
1033 int count = engine->reg_table_count; 1033 int count = engine->reg_table_count;
1034 1034
1035 do { 1035 for (; count > 0; ++table, --count) {
1036 if (!table->master || is_master) { 1036 if (!table->master || is_master) {
1037 const struct drm_i915_reg_descriptor *reg; 1037 const struct drm_i915_reg_descriptor *reg;
1038 1038
@@ -1040,7 +1040,7 @@ find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr)
1040 if (reg != NULL) 1040 if (reg != NULL)
1041 return reg; 1041 return reg;
1042 } 1042 }
1043 } while (table++, --count); 1043 }
1044 1044
1045 return NULL; 1045 return NULL;
1046} 1046}
@@ -1212,6 +1212,12 @@ static bool check_cmd(const struct intel_engine_cs *engine,
1212 continue; 1212 continue;
1213 } 1213 }
1214 1214
1215 if (desc->bits[i].offset >= length) {
1216 DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X, too short to check bitmask (%s)\n",
1217 *cmd, engine->name);
1218 return false;
1219 }
1220
1215 dword = cmd[desc->bits[i].offset] & 1221 dword = cmd[desc->bits[i].offset] &
1216 desc->bits[i].mask; 1222 desc->bits[i].mask;
1217 1223
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 6c8da9d20c33..173d0095e3b2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1842,6 +1842,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
1842 if (IS_GEN9_LP(dev_priv) || 1842 if (IS_GEN9_LP(dev_priv) ||
1843 !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) 1843 !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
1844 intel_power_domains_init_hw(dev_priv, true); 1844 intel_power_domains_init_hw(dev_priv, true);
1845 else
1846 intel_display_set_init_power(dev_priv, true);
1845 1847
1846 i915_gem_sanitize(dev_priv); 1848 i915_gem_sanitize(dev_priv);
1847 1849
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index caebd5825279..a42deebedb0f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3717,7 +3717,11 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
3717 struct intel_display_error_state *error); 3717 struct intel_display_error_state *error);
3718 3718
3719int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); 3719int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
3720int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); 3720int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox,
3721 u32 val, int timeout_us);
3722#define sandybridge_pcode_write(dev_priv, mbox, val) \
3723 sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500)
3724
3721int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, 3725int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
3722 u32 reply_mask, u32 reply, int timeout_base_ms); 3726 u32 reply_mask, u32 reply, int timeout_base_ms);
3723 3727
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8bc3283484be..dd89abd2263d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3323,16 +3323,15 @@ i915_gem_retire_work_handler(struct work_struct *work)
3323 mutex_unlock(&dev->struct_mutex); 3323 mutex_unlock(&dev->struct_mutex);
3324 } 3324 }
3325 3325
3326 /* Keep the retire handler running until we are finally idle. 3326 /*
3327 * Keep the retire handler running until we are finally idle.
3327 * We do not need to do this test under locking as in the worst-case 3328 * We do not need to do this test under locking as in the worst-case
3328 * we queue the retire worker once too often. 3329 * we queue the retire worker once too often.
3329 */ 3330 */
3330 if (READ_ONCE(dev_priv->gt.awake)) { 3331 if (READ_ONCE(dev_priv->gt.awake))
3331 i915_queue_hangcheck(dev_priv);
3332 queue_delayed_work(dev_priv->wq, 3332 queue_delayed_work(dev_priv->wq,
3333 &dev_priv->gt.retire_work, 3333 &dev_priv->gt.retire_work,
3334 round_jiffies_up_relative(HZ)); 3334 round_jiffies_up_relative(HZ));
3335 }
3336} 3335}
3337 3336
3338static inline bool 3337static inline bool
@@ -5283,6 +5282,8 @@ err_unlock:
5283 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5282 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5284 mutex_unlock(&dev_priv->drm.struct_mutex); 5283 mutex_unlock(&dev_priv->drm.struct_mutex);
5285 5284
5285 intel_uc_fini_wq(dev_priv);
5286
5286 if (ret != -EIO) 5287 if (ret != -EIO)
5287 i915_gem_cleanup_userptr(dev_priv); 5288 i915_gem_cleanup_userptr(dev_priv);
5288 5289
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index c5f393870532..7e403eaa9e0f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -377,6 +377,7 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
377static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) 377static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
378{ 378{
379 struct pagevec *pvec = &vm->free_pages; 379 struct pagevec *pvec = &vm->free_pages;
380 struct pagevec stash;
380 381
381 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) 382 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
382 i915_gem_shrink_all(vm->i915); 383 i915_gem_shrink_all(vm->i915);
@@ -395,7 +396,15 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
395 if (likely(pvec->nr)) 396 if (likely(pvec->nr))
396 return pvec->pages[--pvec->nr]; 397 return pvec->pages[--pvec->nr];
397 398
398 /* Otherwise batch allocate pages to amoritize cost of set_pages_wc. */ 399 /*
400 * Otherwise batch allocate pages to amoritize cost of set_pages_wc.
401 *
402 * We have to be careful as page allocation may trigger the shrinker
403 * (via direct reclaim) which will fill up the WC stash underneath us.
404 * So we add our WB pages into a temporary pvec on the stack and merge
405 * them into the WC stash after all the allocations are complete.
406 */
407 pagevec_init(&stash);
399 do { 408 do {
400 struct page *page; 409 struct page *page;
401 410
@@ -403,15 +412,24 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
403 if (unlikely(!page)) 412 if (unlikely(!page))
404 break; 413 break;
405 414
406 pvec->pages[pvec->nr++] = page; 415 stash.pages[stash.nr++] = page;
407 } while (pagevec_space(pvec)); 416 } while (stash.nr < pagevec_space(pvec));
408 417
409 if (unlikely(!pvec->nr)) 418 if (stash.nr) {
410 return NULL; 419 int nr = min_t(int, stash.nr, pagevec_space(pvec));
420 struct page **pages = stash.pages + stash.nr - nr;
411 421
412 set_pages_array_wc(pvec->pages, pvec->nr); 422 if (nr && !set_pages_array_wc(pages, nr)) {
423 memcpy(pvec->pages + pvec->nr,
424 pages, sizeof(pages[0]) * nr);
425 pvec->nr += nr;
426 stash.nr -= nr;
427 }
413 428
414 return pvec->pages[--pvec->nr]; 429 pagevec_release(&stash);
430 }
431
432 return likely(pvec->nr) ? pvec->pages[--pvec->nr] : NULL;
415} 433}
416 434
417static void vm_free_pages_release(struct i915_address_space *vm, 435static void vm_free_pages_release(struct i915_address_space *vm,
@@ -1341,15 +1359,18 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
1341 int count = gen8_pte_count(start, length); 1359 int count = gen8_pte_count(start, length);
1342 1360
1343 if (pt == vm->scratch_pt) { 1361 if (pt == vm->scratch_pt) {
1362 pd->used_pdes++;
1363
1344 pt = alloc_pt(vm); 1364 pt = alloc_pt(vm);
1345 if (IS_ERR(pt)) 1365 if (IS_ERR(pt)) {
1366 pd->used_pdes--;
1346 goto unwind; 1367 goto unwind;
1368 }
1347 1369
1348 if (count < GEN8_PTES || intel_vgpu_active(vm->i915)) 1370 if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
1349 gen8_initialize_pt(vm, pt); 1371 gen8_initialize_pt(vm, pt);
1350 1372
1351 gen8_ppgtt_set_pde(vm, pd, pt, pde); 1373 gen8_ppgtt_set_pde(vm, pd, pt, pde);
1352 pd->used_pdes++;
1353 GEM_BUG_ON(pd->used_pdes > I915_PDES); 1374 GEM_BUG_ON(pd->used_pdes > I915_PDES);
1354 } 1375 }
1355 1376
@@ -1373,13 +1394,16 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
1373 1394
1374 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { 1395 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1375 if (pd == vm->scratch_pd) { 1396 if (pd == vm->scratch_pd) {
1397 pdp->used_pdpes++;
1398
1376 pd = alloc_pd(vm); 1399 pd = alloc_pd(vm);
1377 if (IS_ERR(pd)) 1400 if (IS_ERR(pd)) {
1401 pdp->used_pdpes--;
1378 goto unwind; 1402 goto unwind;
1403 }
1379 1404
1380 gen8_initialize_pd(vm, pd); 1405 gen8_initialize_pd(vm, pd);
1381 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe); 1406 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1382 pdp->used_pdpes++;
1383 GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm)); 1407 GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
1384 1408
1385 mark_tlbs_dirty(i915_vm_to_ppgtt(vm)); 1409 mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
@@ -2287,12 +2311,23 @@ static void gen8_check_and_clear_faults(struct drm_i915_private *dev_priv)
2287 u32 fault = I915_READ(GEN8_RING_FAULT_REG); 2311 u32 fault = I915_READ(GEN8_RING_FAULT_REG);
2288 2312
2289 if (fault & RING_FAULT_VALID) { 2313 if (fault & RING_FAULT_VALID) {
2314 u32 fault_data0, fault_data1;
2315 u64 fault_addr;
2316
2317 fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
2318 fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
2319 fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
2320 ((u64)fault_data0 << 12);
2321
2290 DRM_DEBUG_DRIVER("Unexpected fault\n" 2322 DRM_DEBUG_DRIVER("Unexpected fault\n"
2291 "\tAddr: 0x%08lx\n" 2323 "\tAddr: 0x%08x_%08x\n"
2324 "\tAddress space: %s\n"
2292 "\tEngine ID: %d\n" 2325 "\tEngine ID: %d\n"
2293 "\tSource ID: %d\n" 2326 "\tSource ID: %d\n"
2294 "\tType: %d\n", 2327 "\tType: %d\n",
2295 fault & PAGE_MASK, 2328 upper_32_bits(fault_addr),
2329 lower_32_bits(fault_addr),
2330 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
2296 GEN8_RING_FAULT_ENGINE_ID(fault), 2331 GEN8_RING_FAULT_ENGINE_ID(fault),
2297 RING_FAULT_SRCID(fault), 2332 RING_FAULT_SRCID(fault),
2298 RING_FAULT_FAULT_TYPE(fault)); 2333 RING_FAULT_FAULT_TYPE(fault));
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index d575109f7a7f..e09d18df8b7f 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -276,6 +276,8 @@ static void mark_busy(struct drm_i915_private *i915)
276 276
277 intel_engines_unpark(i915); 277 intel_engines_unpark(i915);
278 278
279 i915_queue_hangcheck(i915);
280
279 queue_delayed_work(i915->wq, 281 queue_delayed_work(i915->wq,
280 &i915->gt.retire_work, 282 &i915->gt.retire_work,
281 round_jiffies_up_relative(HZ)); 283 round_jiffies_up_relative(HZ));
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 9029ed04879c..0e158f9287c4 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -363,13 +363,13 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
363 I915_SHRINK_BOUND | 363 I915_SHRINK_BOUND |
364 I915_SHRINK_UNBOUND | 364 I915_SHRINK_UNBOUND |
365 I915_SHRINK_PURGEABLE); 365 I915_SHRINK_PURGEABLE);
366 if (freed < sc->nr_to_scan) 366 if (sc->nr_scanned < sc->nr_to_scan)
367 freed += i915_gem_shrink(i915, 367 freed += i915_gem_shrink(i915,
368 sc->nr_to_scan - sc->nr_scanned, 368 sc->nr_to_scan - sc->nr_scanned,
369 &sc->nr_scanned, 369 &sc->nr_scanned,
370 I915_SHRINK_BOUND | 370 I915_SHRINK_BOUND |
371 I915_SHRINK_UNBOUND); 371 I915_SHRINK_UNBOUND);
372 if (freed < sc->nr_to_scan && current_is_kswapd()) { 372 if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
373 intel_runtime_pm_get(i915); 373 intel_runtime_pm_get(i915);
374 freed += i915_gem_shrink(i915, 374 freed += i915_gem_shrink(i915,
375 sc->nr_to_scan - sc->nr_scanned, 375 sc->nr_to_scan - sc->nr_scanned,
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 36d48422b475..1c30c688f23a 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -74,19 +74,19 @@
74 GEN_DEFAULT_PAGE_SIZES, \ 74 GEN_DEFAULT_PAGE_SIZES, \
75 CURSOR_OFFSETS 75 CURSOR_OFFSETS
76 76
77static const struct intel_device_info intel_i830_info __initconst = { 77static const struct intel_device_info intel_i830_info = {
78 GEN2_FEATURES, 78 GEN2_FEATURES,
79 .platform = INTEL_I830, 79 .platform = INTEL_I830,
80 .is_mobile = 1, .cursor_needs_physical = 1, 80 .is_mobile = 1, .cursor_needs_physical = 1,
81 .num_pipes = 2, /* legal, last one wins */ 81 .num_pipes = 2, /* legal, last one wins */
82}; 82};
83 83
84static const struct intel_device_info intel_i845g_info __initconst = { 84static const struct intel_device_info intel_i845g_info = {
85 GEN2_FEATURES, 85 GEN2_FEATURES,
86 .platform = INTEL_I845G, 86 .platform = INTEL_I845G,
87}; 87};
88 88
89static const struct intel_device_info intel_i85x_info __initconst = { 89static const struct intel_device_info intel_i85x_info = {
90 GEN2_FEATURES, 90 GEN2_FEATURES,
91 .platform = INTEL_I85X, .is_mobile = 1, 91 .platform = INTEL_I85X, .is_mobile = 1,
92 .num_pipes = 2, /* legal, last one wins */ 92 .num_pipes = 2, /* legal, last one wins */
@@ -94,7 +94,7 @@ static const struct intel_device_info intel_i85x_info __initconst = {
94 .has_fbc = 1, 94 .has_fbc = 1,
95}; 95};
96 96
97static const struct intel_device_info intel_i865g_info __initconst = { 97static const struct intel_device_info intel_i865g_info = {
98 GEN2_FEATURES, 98 GEN2_FEATURES,
99 .platform = INTEL_I865G, 99 .platform = INTEL_I865G,
100}; 100};
@@ -108,7 +108,7 @@ static const struct intel_device_info intel_i865g_info __initconst = {
108 GEN_DEFAULT_PAGE_SIZES, \ 108 GEN_DEFAULT_PAGE_SIZES, \
109 CURSOR_OFFSETS 109 CURSOR_OFFSETS
110 110
111static const struct intel_device_info intel_i915g_info __initconst = { 111static const struct intel_device_info intel_i915g_info = {
112 GEN3_FEATURES, 112 GEN3_FEATURES,
113 .platform = INTEL_I915G, .cursor_needs_physical = 1, 113 .platform = INTEL_I915G, .cursor_needs_physical = 1,
114 .has_overlay = 1, .overlay_needs_physical = 1, 114 .has_overlay = 1, .overlay_needs_physical = 1,
@@ -116,7 +116,7 @@ static const struct intel_device_info intel_i915g_info __initconst = {
116 .unfenced_needs_alignment = 1, 116 .unfenced_needs_alignment = 1,
117}; 117};
118 118
119static const struct intel_device_info intel_i915gm_info __initconst = { 119static const struct intel_device_info intel_i915gm_info = {
120 GEN3_FEATURES, 120 GEN3_FEATURES,
121 .platform = INTEL_I915GM, 121 .platform = INTEL_I915GM,
122 .is_mobile = 1, 122 .is_mobile = 1,
@@ -128,7 +128,7 @@ static const struct intel_device_info intel_i915gm_info __initconst = {
128 .unfenced_needs_alignment = 1, 128 .unfenced_needs_alignment = 1,
129}; 129};
130 130
131static const struct intel_device_info intel_i945g_info __initconst = { 131static const struct intel_device_info intel_i945g_info = {
132 GEN3_FEATURES, 132 GEN3_FEATURES,
133 .platform = INTEL_I945G, 133 .platform = INTEL_I945G,
134 .has_hotplug = 1, .cursor_needs_physical = 1, 134 .has_hotplug = 1, .cursor_needs_physical = 1,
@@ -137,7 +137,7 @@ static const struct intel_device_info intel_i945g_info __initconst = {
137 .unfenced_needs_alignment = 1, 137 .unfenced_needs_alignment = 1,
138}; 138};
139 139
140static const struct intel_device_info intel_i945gm_info __initconst = { 140static const struct intel_device_info intel_i945gm_info = {
141 GEN3_FEATURES, 141 GEN3_FEATURES,
142 .platform = INTEL_I945GM, .is_mobile = 1, 142 .platform = INTEL_I945GM, .is_mobile = 1,
143 .has_hotplug = 1, .cursor_needs_physical = 1, 143 .has_hotplug = 1, .cursor_needs_physical = 1,
@@ -148,14 +148,14 @@ static const struct intel_device_info intel_i945gm_info __initconst = {
148 .unfenced_needs_alignment = 1, 148 .unfenced_needs_alignment = 1,
149}; 149};
150 150
151static const struct intel_device_info intel_g33_info __initconst = { 151static const struct intel_device_info intel_g33_info = {
152 GEN3_FEATURES, 152 GEN3_FEATURES,
153 .platform = INTEL_G33, 153 .platform = INTEL_G33,
154 .has_hotplug = 1, 154 .has_hotplug = 1,
155 .has_overlay = 1, 155 .has_overlay = 1,
156}; 156};
157 157
158static const struct intel_device_info intel_pineview_info __initconst = { 158static const struct intel_device_info intel_pineview_info = {
159 GEN3_FEATURES, 159 GEN3_FEATURES,
160 .platform = INTEL_PINEVIEW, .is_mobile = 1, 160 .platform = INTEL_PINEVIEW, .is_mobile = 1,
161 .has_hotplug = 1, 161 .has_hotplug = 1,
@@ -172,7 +172,7 @@ static const struct intel_device_info intel_pineview_info __initconst = {
172 GEN_DEFAULT_PAGE_SIZES, \ 172 GEN_DEFAULT_PAGE_SIZES, \
173 CURSOR_OFFSETS 173 CURSOR_OFFSETS
174 174
175static const struct intel_device_info intel_i965g_info __initconst = { 175static const struct intel_device_info intel_i965g_info = {
176 GEN4_FEATURES, 176 GEN4_FEATURES,
177 .platform = INTEL_I965G, 177 .platform = INTEL_I965G,
178 .has_overlay = 1, 178 .has_overlay = 1,
@@ -180,7 +180,7 @@ static const struct intel_device_info intel_i965g_info __initconst = {
180 .has_snoop = false, 180 .has_snoop = false,
181}; 181};
182 182
183static const struct intel_device_info intel_i965gm_info __initconst = { 183static const struct intel_device_info intel_i965gm_info = {
184 GEN4_FEATURES, 184 GEN4_FEATURES,
185 .platform = INTEL_I965GM, 185 .platform = INTEL_I965GM,
186 .is_mobile = 1, .has_fbc = 1, 186 .is_mobile = 1, .has_fbc = 1,
@@ -190,13 +190,13 @@ static const struct intel_device_info intel_i965gm_info __initconst = {
190 .has_snoop = false, 190 .has_snoop = false,
191}; 191};
192 192
193static const struct intel_device_info intel_g45_info __initconst = { 193static const struct intel_device_info intel_g45_info = {
194 GEN4_FEATURES, 194 GEN4_FEATURES,
195 .platform = INTEL_G45, 195 .platform = INTEL_G45,
196 .ring_mask = RENDER_RING | BSD_RING, 196 .ring_mask = RENDER_RING | BSD_RING,
197}; 197};
198 198
199static const struct intel_device_info intel_gm45_info __initconst = { 199static const struct intel_device_info intel_gm45_info = {
200 GEN4_FEATURES, 200 GEN4_FEATURES,
201 .platform = INTEL_GM45, 201 .platform = INTEL_GM45,
202 .is_mobile = 1, .has_fbc = 1, 202 .is_mobile = 1, .has_fbc = 1,
@@ -215,12 +215,12 @@ static const struct intel_device_info intel_gm45_info __initconst = {
215 GEN_DEFAULT_PAGE_SIZES, \ 215 GEN_DEFAULT_PAGE_SIZES, \
216 CURSOR_OFFSETS 216 CURSOR_OFFSETS
217 217
218static const struct intel_device_info intel_ironlake_d_info __initconst = { 218static const struct intel_device_info intel_ironlake_d_info = {
219 GEN5_FEATURES, 219 GEN5_FEATURES,
220 .platform = INTEL_IRONLAKE, 220 .platform = INTEL_IRONLAKE,
221}; 221};
222 222
223static const struct intel_device_info intel_ironlake_m_info __initconst = { 223static const struct intel_device_info intel_ironlake_m_info = {
224 GEN5_FEATURES, 224 GEN5_FEATURES,
225 .platform = INTEL_IRONLAKE, 225 .platform = INTEL_IRONLAKE,
226 .is_mobile = 1, .has_fbc = 1, 226 .is_mobile = 1, .has_fbc = 1,
@@ -243,12 +243,12 @@ static const struct intel_device_info intel_ironlake_m_info __initconst = {
243 GEN6_FEATURES, \ 243 GEN6_FEATURES, \
244 .platform = INTEL_SANDYBRIDGE 244 .platform = INTEL_SANDYBRIDGE
245 245
246static const struct intel_device_info intel_sandybridge_d_gt1_info __initconst = { 246static const struct intel_device_info intel_sandybridge_d_gt1_info = {
247 SNB_D_PLATFORM, 247 SNB_D_PLATFORM,
248 .gt = 1, 248 .gt = 1,
249}; 249};
250 250
251static const struct intel_device_info intel_sandybridge_d_gt2_info __initconst = { 251static const struct intel_device_info intel_sandybridge_d_gt2_info = {
252 SNB_D_PLATFORM, 252 SNB_D_PLATFORM,
253 .gt = 2, 253 .gt = 2,
254}; 254};
@@ -259,12 +259,12 @@ static const struct intel_device_info intel_sandybridge_d_gt2_info __initconst =
259 .is_mobile = 1 259 .is_mobile = 1
260 260
261 261
262static const struct intel_device_info intel_sandybridge_m_gt1_info __initconst = { 262static const struct intel_device_info intel_sandybridge_m_gt1_info = {
263 SNB_M_PLATFORM, 263 SNB_M_PLATFORM,
264 .gt = 1, 264 .gt = 1,
265}; 265};
266 266
267static const struct intel_device_info intel_sandybridge_m_gt2_info __initconst = { 267static const struct intel_device_info intel_sandybridge_m_gt2_info = {
268 SNB_M_PLATFORM, 268 SNB_M_PLATFORM,
269 .gt = 2, 269 .gt = 2,
270}; 270};
@@ -288,12 +288,12 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info __initconst =
288 .platform = INTEL_IVYBRIDGE, \ 288 .platform = INTEL_IVYBRIDGE, \
289 .has_l3_dpf = 1 289 .has_l3_dpf = 1
290 290
291static const struct intel_device_info intel_ivybridge_d_gt1_info __initconst = { 291static const struct intel_device_info intel_ivybridge_d_gt1_info = {
292 IVB_D_PLATFORM, 292 IVB_D_PLATFORM,
293 .gt = 1, 293 .gt = 1,
294}; 294};
295 295
296static const struct intel_device_info intel_ivybridge_d_gt2_info __initconst = { 296static const struct intel_device_info intel_ivybridge_d_gt2_info = {
297 IVB_D_PLATFORM, 297 IVB_D_PLATFORM,
298 .gt = 2, 298 .gt = 2,
299}; 299};
@@ -304,17 +304,17 @@ static const struct intel_device_info intel_ivybridge_d_gt2_info __initconst = {
304 .is_mobile = 1, \ 304 .is_mobile = 1, \
305 .has_l3_dpf = 1 305 .has_l3_dpf = 1
306 306
307static const struct intel_device_info intel_ivybridge_m_gt1_info __initconst = { 307static const struct intel_device_info intel_ivybridge_m_gt1_info = {
308 IVB_M_PLATFORM, 308 IVB_M_PLATFORM,
309 .gt = 1, 309 .gt = 1,
310}; 310};
311 311
312static const struct intel_device_info intel_ivybridge_m_gt2_info __initconst = { 312static const struct intel_device_info intel_ivybridge_m_gt2_info = {
313 IVB_M_PLATFORM, 313 IVB_M_PLATFORM,
314 .gt = 2, 314 .gt = 2,
315}; 315};
316 316
317static const struct intel_device_info intel_ivybridge_q_info __initconst = { 317static const struct intel_device_info intel_ivybridge_q_info = {
318 GEN7_FEATURES, 318 GEN7_FEATURES,
319 .platform = INTEL_IVYBRIDGE, 319 .platform = INTEL_IVYBRIDGE,
320 .gt = 2, 320 .gt = 2,
@@ -322,7 +322,7 @@ static const struct intel_device_info intel_ivybridge_q_info __initconst = {
322 .has_l3_dpf = 1, 322 .has_l3_dpf = 1,
323}; 323};
324 324
325static const struct intel_device_info intel_valleyview_info __initconst = { 325static const struct intel_device_info intel_valleyview_info = {
326 .platform = INTEL_VALLEYVIEW, 326 .platform = INTEL_VALLEYVIEW,
327 .gen = 7, 327 .gen = 7,
328 .is_lp = 1, 328 .is_lp = 1,
@@ -358,17 +358,17 @@ static const struct intel_device_info intel_valleyview_info __initconst = {
358 .platform = INTEL_HASWELL, \ 358 .platform = INTEL_HASWELL, \
359 .has_l3_dpf = 1 359 .has_l3_dpf = 1
360 360
361static const struct intel_device_info intel_haswell_gt1_info __initconst = { 361static const struct intel_device_info intel_haswell_gt1_info = {
362 HSW_PLATFORM, 362 HSW_PLATFORM,
363 .gt = 1, 363 .gt = 1,
364}; 364};
365 365
366static const struct intel_device_info intel_haswell_gt2_info __initconst = { 366static const struct intel_device_info intel_haswell_gt2_info = {
367 HSW_PLATFORM, 367 HSW_PLATFORM,
368 .gt = 2, 368 .gt = 2,
369}; 369};
370 370
371static const struct intel_device_info intel_haswell_gt3_info __initconst = { 371static const struct intel_device_info intel_haswell_gt3_info = {
372 HSW_PLATFORM, 372 HSW_PLATFORM,
373 .gt = 3, 373 .gt = 3,
374}; 374};
@@ -388,17 +388,17 @@ static const struct intel_device_info intel_haswell_gt3_info __initconst = {
388 .gen = 8, \ 388 .gen = 8, \
389 .platform = INTEL_BROADWELL 389 .platform = INTEL_BROADWELL
390 390
391static const struct intel_device_info intel_broadwell_gt1_info __initconst = { 391static const struct intel_device_info intel_broadwell_gt1_info = {
392 BDW_PLATFORM, 392 BDW_PLATFORM,
393 .gt = 1, 393 .gt = 1,
394}; 394};
395 395
396static const struct intel_device_info intel_broadwell_gt2_info __initconst = { 396static const struct intel_device_info intel_broadwell_gt2_info = {
397 BDW_PLATFORM, 397 BDW_PLATFORM,
398 .gt = 2, 398 .gt = 2,
399}; 399};
400 400
401static const struct intel_device_info intel_broadwell_rsvd_info __initconst = { 401static const struct intel_device_info intel_broadwell_rsvd_info = {
402 BDW_PLATFORM, 402 BDW_PLATFORM,
403 .gt = 3, 403 .gt = 3,
404 /* According to the device ID those devices are GT3, they were 404 /* According to the device ID those devices are GT3, they were
@@ -406,13 +406,13 @@ static const struct intel_device_info intel_broadwell_rsvd_info __initconst = {
406 */ 406 */
407}; 407};
408 408
409static const struct intel_device_info intel_broadwell_gt3_info __initconst = { 409static const struct intel_device_info intel_broadwell_gt3_info = {
410 BDW_PLATFORM, 410 BDW_PLATFORM,
411 .gt = 3, 411 .gt = 3,
412 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 412 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
413}; 413};
414 414
415static const struct intel_device_info intel_cherryview_info __initconst = { 415static const struct intel_device_info intel_cherryview_info = {
416 .gen = 8, .num_pipes = 3, 416 .gen = 8, .num_pipes = 3,
417 .has_hotplug = 1, 417 .has_hotplug = 1,
418 .is_lp = 1, 418 .is_lp = 1,
@@ -455,12 +455,12 @@ static const struct intel_device_info intel_cherryview_info __initconst = {
455 .gen = 9, \ 455 .gen = 9, \
456 .platform = INTEL_SKYLAKE 456 .platform = INTEL_SKYLAKE
457 457
458static const struct intel_device_info intel_skylake_gt1_info __initconst = { 458static const struct intel_device_info intel_skylake_gt1_info = {
459 SKL_PLATFORM, 459 SKL_PLATFORM,
460 .gt = 1, 460 .gt = 1,
461}; 461};
462 462
463static const struct intel_device_info intel_skylake_gt2_info __initconst = { 463static const struct intel_device_info intel_skylake_gt2_info = {
464 SKL_PLATFORM, 464 SKL_PLATFORM,
465 .gt = 2, 465 .gt = 2,
466}; 466};
@@ -470,12 +470,12 @@ static const struct intel_device_info intel_skylake_gt2_info __initconst = {
470 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING 470 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING
471 471
472 472
473static const struct intel_device_info intel_skylake_gt3_info __initconst = { 473static const struct intel_device_info intel_skylake_gt3_info = {
474 SKL_GT3_PLUS_PLATFORM, 474 SKL_GT3_PLUS_PLATFORM,
475 .gt = 3, 475 .gt = 3,
476}; 476};
477 477
478static const struct intel_device_info intel_skylake_gt4_info __initconst = { 478static const struct intel_device_info intel_skylake_gt4_info = {
479 SKL_GT3_PLUS_PLATFORM, 479 SKL_GT3_PLUS_PLATFORM,
480 .gt = 4, 480 .gt = 4,
481}; 481};
@@ -511,13 +511,13 @@ static const struct intel_device_info intel_skylake_gt4_info __initconst = {
511 IVB_CURSOR_OFFSETS, \ 511 IVB_CURSOR_OFFSETS, \
512 BDW_COLORS 512 BDW_COLORS
513 513
514static const struct intel_device_info intel_broxton_info __initconst = { 514static const struct intel_device_info intel_broxton_info = {
515 GEN9_LP_FEATURES, 515 GEN9_LP_FEATURES,
516 .platform = INTEL_BROXTON, 516 .platform = INTEL_BROXTON,
517 .ddb_size = 512, 517 .ddb_size = 512,
518}; 518};
519 519
520static const struct intel_device_info intel_geminilake_info __initconst = { 520static const struct intel_device_info intel_geminilake_info = {
521 GEN9_LP_FEATURES, 521 GEN9_LP_FEATURES,
522 .platform = INTEL_GEMINILAKE, 522 .platform = INTEL_GEMINILAKE,
523 .ddb_size = 1024, 523 .ddb_size = 1024,
@@ -529,17 +529,17 @@ static const struct intel_device_info intel_geminilake_info __initconst = {
529 .gen = 9, \ 529 .gen = 9, \
530 .platform = INTEL_KABYLAKE 530 .platform = INTEL_KABYLAKE
531 531
532static const struct intel_device_info intel_kabylake_gt1_info __initconst = { 532static const struct intel_device_info intel_kabylake_gt1_info = {
533 KBL_PLATFORM, 533 KBL_PLATFORM,
534 .gt = 1, 534 .gt = 1,
535}; 535};
536 536
537static const struct intel_device_info intel_kabylake_gt2_info __initconst = { 537static const struct intel_device_info intel_kabylake_gt2_info = {
538 KBL_PLATFORM, 538 KBL_PLATFORM,
539 .gt = 2, 539 .gt = 2,
540}; 540};
541 541
542static const struct intel_device_info intel_kabylake_gt3_info __initconst = { 542static const struct intel_device_info intel_kabylake_gt3_info = {
543 KBL_PLATFORM, 543 KBL_PLATFORM,
544 .gt = 3, 544 .gt = 3,
545 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 545 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
@@ -550,17 +550,17 @@ static const struct intel_device_info intel_kabylake_gt3_info __initconst = {
550 .gen = 9, \ 550 .gen = 9, \
551 .platform = INTEL_COFFEELAKE 551 .platform = INTEL_COFFEELAKE
552 552
553static const struct intel_device_info intel_coffeelake_gt1_info __initconst = { 553static const struct intel_device_info intel_coffeelake_gt1_info = {
554 CFL_PLATFORM, 554 CFL_PLATFORM,
555 .gt = 1, 555 .gt = 1,
556}; 556};
557 557
558static const struct intel_device_info intel_coffeelake_gt2_info __initconst = { 558static const struct intel_device_info intel_coffeelake_gt2_info = {
559 CFL_PLATFORM, 559 CFL_PLATFORM,
560 .gt = 2, 560 .gt = 2,
561}; 561};
562 562
563static const struct intel_device_info intel_coffeelake_gt3_info __initconst = { 563static const struct intel_device_info intel_coffeelake_gt3_info = {
564 CFL_PLATFORM, 564 CFL_PLATFORM,
565 .gt = 3, 565 .gt = 3,
566 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 566 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
@@ -571,7 +571,7 @@ static const struct intel_device_info intel_coffeelake_gt3_info __initconst = {
571 .ddb_size = 1024, \ 571 .ddb_size = 1024, \
572 GLK_COLORS 572 GLK_COLORS
573 573
574static const struct intel_device_info intel_cannonlake_gt2_info __initconst = { 574static const struct intel_device_info intel_cannonlake_gt2_info = {
575 GEN10_FEATURES, 575 GEN10_FEATURES,
576 .is_alpha_support = 1, 576 .is_alpha_support = 1,
577 .platform = INTEL_CANNONLAKE, 577 .platform = INTEL_CANNONLAKE,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 505c605eff98..a2108e35c599 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2489,6 +2489,8 @@ enum i915_power_well_id {
2489 2489
2490#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10) 2490#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10)
2491#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14) 2491#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14)
2492#define FAULT_VA_HIGH_BITS (0xf << 0)
2493#define FAULT_GTT_SEL (1 << 4)
2492 2494
2493#define FPGA_DBG _MMIO(0x42300) 2495#define FPGA_DBG _MMIO(0x42300)
2494#define FPGA_DBG_RM_NOCLAIM (1<<31) 2496#define FPGA_DBG_RM_NOCLAIM (1<<31)
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index f1502a0188eb..522d54fecb53 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -779,7 +779,7 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
779{ 779{
780 struct intel_encoder *encoder; 780 struct intel_encoder *encoder;
781 781
782 if (WARN_ON(pipe >= INTEL_INFO(dev_priv)->num_pipes)) 782 if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
783 return NULL; 783 return NULL;
784 784
785 /* MST */ 785 /* MST */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 51108ffc28d1..f7f771749e48 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1107,6 +1107,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
1107} 1107}
1108 1108
1109static const u8 cnp_ddc_pin_map[] = { 1109static const u8 cnp_ddc_pin_map[] = {
1110 [0] = 0, /* N/A */
1110 [DDC_BUS_DDI_B] = GMBUS_PIN_1_BXT, 1111 [DDC_BUS_DDI_B] = GMBUS_PIN_1_BXT,
1111 [DDC_BUS_DDI_C] = GMBUS_PIN_2_BXT, 1112 [DDC_BUS_DDI_C] = GMBUS_PIN_2_BXT,
1112 [DDC_BUS_DDI_D] = GMBUS_PIN_4_CNP, /* sic */ 1113 [DDC_BUS_DDI_D] = GMBUS_PIN_4_CNP, /* sic */
@@ -1115,9 +1116,14 @@ static const u8 cnp_ddc_pin_map[] = {
1115 1116
1116static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin) 1117static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
1117{ 1118{
1118 if (HAS_PCH_CNP(dev_priv) && 1119 if (HAS_PCH_CNP(dev_priv)) {
1119 vbt_pin > 0 && vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map)) 1120 if (vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map)) {
1120 return cnp_ddc_pin_map[vbt_pin]; 1121 return cnp_ddc_pin_map[vbt_pin];
1122 } else {
1123 DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n", vbt_pin);
1124 return 0;
1125 }
1126 }
1121 1127
1122 return vbt_pin; 1128 return vbt_pin;
1123} 1129}
@@ -1323,11 +1329,13 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
1323 expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE; 1329 expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE;
1324 } else if (bdb->version == 195) { 1330 } else if (bdb->version == 195) {
1325 expected_size = 37; 1331 expected_size = 37;
1326 } else if (bdb->version <= 197) { 1332 } else if (bdb->version <= 215) {
1327 expected_size = 38; 1333 expected_size = 38;
1334 } else if (bdb->version <= 216) {
1335 expected_size = 39;
1328 } else { 1336 } else {
1329 expected_size = 38; 1337 expected_size = sizeof(*child);
1330 BUILD_BUG_ON(sizeof(*child) < 38); 1338 BUILD_BUG_ON(sizeof(*child) < 39);
1331 DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n", 1339 DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n",
1332 bdb->version, expected_size); 1340 bdb->version, expected_size);
1333 } 1341 }
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 58c624f982d9..bd40fea16b4f 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -149,17 +149,6 @@ static void intel_breadcrumbs_fake_irq(struct timer_list *t)
149 return; 149 return;
150 150
151 mod_timer(&b->fake_irq, jiffies + 1); 151 mod_timer(&b->fake_irq, jiffies + 1);
152
153 /* Ensure that even if the GPU hangs, we get woken up.
154 *
155 * However, note that if no one is waiting, we never notice
156 * a gpu hang. Eventually, we will have to wait for a resource
157 * held by the GPU and so trigger a hangcheck. In the most
158 * pathological case, this will be upon memory starvation! To
159 * prevent this, we also queue the hangcheck from the retire
160 * worker.
161 */
162 i915_queue_hangcheck(engine->i915);
163} 152}
164 153
165static void irq_enable(struct intel_engine_cs *engine) 154static void irq_enable(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index d77e2bec1e29..5dc118f26b51 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -1370,10 +1370,15 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
1370 break; 1370 break;
1371 } 1371 }
1372 1372
1373 /* Inform power controller of upcoming frequency change */ 1373 /*
1374 * Inform power controller of upcoming frequency change. BSpec
1375 * requires us to wait up to 150usec, but that leads to timeouts;
1376 * the 2ms used here is based on experiment.
1377 */
1374 mutex_lock(&dev_priv->pcu_lock); 1378 mutex_lock(&dev_priv->pcu_lock);
1375 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 1379 ret = sandybridge_pcode_write_timeout(dev_priv,
1376 0x80000000); 1380 HSW_PCODE_DE_WRITE_FREQ_REQ,
1381 0x80000000, 2000);
1377 mutex_unlock(&dev_priv->pcu_lock); 1382 mutex_unlock(&dev_priv->pcu_lock);
1378 1383
1379 if (ret) { 1384 if (ret) {
@@ -1404,8 +1409,15 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
1404 I915_WRITE(CDCLK_CTL, val); 1409 I915_WRITE(CDCLK_CTL, val);
1405 1410
1406 mutex_lock(&dev_priv->pcu_lock); 1411 mutex_lock(&dev_priv->pcu_lock);
1407 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 1412 /*
1408 cdclk_state->voltage_level); 1413 * The timeout isn't specified, the 2ms used here is based on
1414 * experiment.
1415 * FIXME: Waiting for the request completion could be delayed until
1416 * the next PCODE request based on BSpec.
1417 */
1418 ret = sandybridge_pcode_write_timeout(dev_priv,
1419 HSW_PCODE_DE_WRITE_FREQ_REQ,
1420 cdclk_state->voltage_level, 2000);
1409 mutex_unlock(&dev_priv->pcu_lock); 1421 mutex_unlock(&dev_priv->pcu_lock);
1410 1422
1411 if (ret) { 1423 if (ret) {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 0cd355978ab4..f288bcc7be22 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5661,8 +5661,8 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc,
5661 if (!crtc_state->base.active) 5661 if (!crtc_state->base.active)
5662 return 0; 5662 return 0;
5663 5663
5664 mask = BIT(POWER_DOMAIN_PIPE(pipe)); 5664 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
5665 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); 5665 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
5666 if (crtc_state->pch_pfit.enabled || 5666 if (crtc_state->pch_pfit.enabled ||
5667 crtc_state->pch_pfit.force_thru) 5667 crtc_state->pch_pfit.force_thru)
5668 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 5668 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
@@ -5674,7 +5674,7 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc,
5674 } 5674 }
5675 5675
5676 if (HAS_DDI(dev_priv) && crtc_state->has_audio) 5676 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
5677 mask |= BIT(POWER_DOMAIN_AUDIO); 5677 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
5678 5678
5679 if (crtc_state->shared_dpll) 5679 if (crtc_state->shared_dpll)
5680 mask |= BIT_ULL(POWER_DOMAIN_PLLS); 5680 mask |= BIT_ULL(POWER_DOMAIN_PLLS);
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index 05907fa8a553..cf8fef8b6f58 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -328,14 +328,22 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
328 return; 328 return;
329 329
330 failure_handling: 330 failure_handling:
331 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d", 331 /* Dont fallback and prune modes if its eDP */
332 intel_connector->base.base.id, 332 if (!intel_dp_is_edp(intel_dp)) {
333 intel_connector->base.name, 333 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
334 intel_dp->link_rate, intel_dp->lane_count); 334 intel_connector->base.base.id,
335 if (!intel_dp_get_link_train_fallback_values(intel_dp, 335 intel_connector->base.name,
336 intel_dp->link_rate, 336 intel_dp->link_rate, intel_dp->lane_count);
337 intel_dp->lane_count)) 337 if (!intel_dp_get_link_train_fallback_values(intel_dp,
338 /* Schedule a Hotplug Uevent to userspace to start modeset */ 338 intel_dp->link_rate,
339 schedule_work(&intel_connector->modeset_retry_work); 339 intel_dp->lane_count))
340 /* Schedule a Hotplug Uevent to userspace to start modeset */
341 schedule_work(&intel_connector->modeset_retry_work);
342 } else {
343 DRM_ERROR("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
344 intel_connector->base.base.id,
345 intel_connector->base.name,
346 intel_dp->link_rate, intel_dp->lane_count);
347 }
340 return; 348 return;
341} 349}
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 6bb51a502b8b..d790bdc227ff 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1951,8 +1951,22 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
1951 spin_lock_irqsave(&engine->stats.lock, flags); 1951 spin_lock_irqsave(&engine->stats.lock, flags);
1952 if (engine->stats.enabled == ~0) 1952 if (engine->stats.enabled == ~0)
1953 goto busy; 1953 goto busy;
1954 if (engine->stats.enabled++ == 0) 1954 if (engine->stats.enabled++ == 0) {
1955 struct intel_engine_execlists *execlists = &engine->execlists;
1956 const struct execlist_port *port = execlists->port;
1957 unsigned int num_ports = execlists_num_ports(execlists);
1958
1955 engine->stats.enabled_at = ktime_get(); 1959 engine->stats.enabled_at = ktime_get();
1960
1961 /* XXX submission method oblivious? */
1962 while (num_ports-- && port_isset(port)) {
1963 engine->stats.active++;
1964 port++;
1965 }
1966
1967 if (engine->stats.active)
1968 engine->stats.start = engine->stats.enabled_at;
1969 }
1956 spin_unlock_irqrestore(&engine->stats.lock, flags); 1970 spin_unlock_irqrestore(&engine->stats.lock, flags);
1957 1971
1958 return 0; 1972 return 0;
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index cbc51c960425..3b0932942857 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -39,9 +39,6 @@
39#define KBL_FW_MAJOR 9 39#define KBL_FW_MAJOR 9
40#define KBL_FW_MINOR 39 40#define KBL_FW_MINOR 39
41 41
42#define GLK_FW_MAJOR 10
43#define GLK_FW_MINOR 56
44
45#define GUC_FW_PATH(platform, major, minor) \ 42#define GUC_FW_PATH(platform, major, minor) \
46 "i915/" __stringify(platform) "_guc_ver" __stringify(major) "_" __stringify(minor) ".bin" 43 "i915/" __stringify(platform) "_guc_ver" __stringify(major) "_" __stringify(minor) ".bin"
47 44
@@ -54,8 +51,6 @@ MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
54#define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR) 51#define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR)
55MODULE_FIRMWARE(I915_KBL_GUC_UCODE); 52MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
56 53
57#define I915_GLK_GUC_UCODE GUC_FW_PATH(glk, GLK_FW_MAJOR, GLK_FW_MINOR)
58
59static void guc_fw_select(struct intel_uc_fw *guc_fw) 54static void guc_fw_select(struct intel_uc_fw *guc_fw)
60{ 55{
61 struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw); 56 struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
@@ -82,10 +77,6 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw)
82 guc_fw->path = I915_KBL_GUC_UCODE; 77 guc_fw->path = I915_KBL_GUC_UCODE;
83 guc_fw->major_ver_wanted = KBL_FW_MAJOR; 78 guc_fw->major_ver_wanted = KBL_FW_MAJOR;
84 guc_fw->minor_ver_wanted = KBL_FW_MINOR; 79 guc_fw->minor_ver_wanted = KBL_FW_MINOR;
85 } else if (IS_GEMINILAKE(dev_priv)) {
86 guc_fw->path = I915_GLK_GUC_UCODE;
87 guc_fw->major_ver_wanted = GLK_FW_MAJOR;
88 guc_fw->minor_ver_wanted = GLK_FW_MINOR;
89 } else { 80 } else {
90 DRM_WARN("%s: No firmware known for this platform!\n", 81 DRM_WARN("%s: No firmware known for this platform!\n",
91 intel_uc_fw_type_repr(guc_fw->type)); 82 intel_uc_fw_type_repr(guc_fw->type));
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index 31f01d64c021..348a4f7ffb67 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -411,7 +411,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
411 struct intel_engine_cs *engine; 411 struct intel_engine_cs *engine;
412 enum intel_engine_id id; 412 enum intel_engine_id id;
413 unsigned int hung = 0, stuck = 0; 413 unsigned int hung = 0, stuck = 0;
414 int busy_count = 0;
415 414
416 if (!i915_modparams.enable_hangcheck) 415 if (!i915_modparams.enable_hangcheck)
417 return; 416 return;
@@ -429,7 +428,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
429 intel_uncore_arm_unclaimed_mmio_detection(dev_priv); 428 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
430 429
431 for_each_engine(engine, dev_priv, id) { 430 for_each_engine(engine, dev_priv, id) {
432 const bool busy = intel_engine_has_waiter(engine);
433 struct intel_engine_hangcheck hc; 431 struct intel_engine_hangcheck hc;
434 432
435 semaphore_clear_deadlocks(dev_priv); 433 semaphore_clear_deadlocks(dev_priv);
@@ -443,16 +441,13 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
443 if (hc.action != ENGINE_DEAD) 441 if (hc.action != ENGINE_DEAD)
444 stuck |= intel_engine_flag(engine); 442 stuck |= intel_engine_flag(engine);
445 } 443 }
446
447 busy_count += busy;
448 } 444 }
449 445
450 if (hung) 446 if (hung)
451 hangcheck_declare_hang(dev_priv, hung, stuck); 447 hangcheck_declare_hang(dev_priv, hung, stuck);
452 448
453 /* Reset timer in case GPU hangs without another request being added */ 449 /* Reset timer in case GPU hangs without another request being added */
454 if (busy_count) 450 i915_queue_hangcheck(dev_priv);
455 i915_queue_hangcheck(dev_priv);
456} 451}
457 452
458void intel_engine_init_hangcheck(struct intel_engine_cs *engine) 453void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index bced7b954d93..179d0ad3889d 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1595,12 +1595,20 @@ intel_hdmi_set_edid(struct drm_connector *connector)
1595 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 1595 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
1596 struct edid *edid; 1596 struct edid *edid;
1597 bool connected = false; 1597 bool connected = false;
1598 struct i2c_adapter *i2c;
1598 1599
1599 intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); 1600 intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
1600 1601
1601 edid = drm_get_edid(connector, 1602 i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
1602 intel_gmbus_get_adapter(dev_priv, 1603
1603 intel_hdmi->ddc_bus)); 1604 edid = drm_get_edid(connector, i2c);
1605
1606 if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
1607 DRM_DEBUG_KMS("HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
1608 intel_gmbus_force_bit(i2c, true);
1609 edid = drm_get_edid(connector, i2c);
1610 intel_gmbus_force_bit(i2c, false);
1611 }
1604 1612
1605 intel_hdmi_dp_dual_mode_detect(connector, edid != NULL); 1613 intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
1606 1614
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 974be3defa70..8ed05182f944 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -54,10 +54,6 @@
54#define KBL_HUC_FW_MINOR 00 54#define KBL_HUC_FW_MINOR 00
55#define KBL_BLD_NUM 1810 55#define KBL_BLD_NUM 1810
56 56
57#define GLK_HUC_FW_MAJOR 02
58#define GLK_HUC_FW_MINOR 00
59#define GLK_BLD_NUM 1748
60
61#define HUC_FW_PATH(platform, major, minor, bld_num) \ 57#define HUC_FW_PATH(platform, major, minor, bld_num) \
62 "i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \ 58 "i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \
63 __stringify(minor) "_" __stringify(bld_num) ".bin" 59 __stringify(minor) "_" __stringify(bld_num) ".bin"
@@ -74,9 +70,6 @@ MODULE_FIRMWARE(I915_BXT_HUC_UCODE);
74 KBL_HUC_FW_MINOR, KBL_BLD_NUM) 70 KBL_HUC_FW_MINOR, KBL_BLD_NUM)
75MODULE_FIRMWARE(I915_KBL_HUC_UCODE); 71MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
76 72
77#define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \
78 GLK_HUC_FW_MINOR, GLK_BLD_NUM)
79
80static void huc_fw_select(struct intel_uc_fw *huc_fw) 73static void huc_fw_select(struct intel_uc_fw *huc_fw)
81{ 74{
82 struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw); 75 struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
@@ -103,10 +96,6 @@ static void huc_fw_select(struct intel_uc_fw *huc_fw)
103 huc_fw->path = I915_KBL_HUC_UCODE; 96 huc_fw->path = I915_KBL_HUC_UCODE;
104 huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR; 97 huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
105 huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR; 98 huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
106 } else if (IS_GEMINILAKE(dev_priv)) {
107 huc_fw->path = I915_GLK_HUC_UCODE;
108 huc_fw->major_ver_wanted = GLK_HUC_FW_MAJOR;
109 huc_fw->minor_ver_wanted = GLK_HUC_FW_MINOR;
110 } else { 99 } else {
111 DRM_WARN("%s: No firmware known for this platform!\n", 100 DRM_WARN("%s: No firmware known for this platform!\n",
112 intel_uc_fw_type_repr(huc_fw->type)); 101 intel_uc_fw_type_repr(huc_fw->type));
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 1db79a860b96..1a6e699e19e0 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -9149,8 +9149,8 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
9149 return 0; 9149 return 0;
9150} 9150}
9151 9151
9152int sandybridge_pcode_write(struct drm_i915_private *dev_priv, 9152int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
9153 u32 mbox, u32 val) 9153 u32 mbox, u32 val, int timeout_us)
9154{ 9154{
9155 int status; 9155 int status;
9156 9156
@@ -9173,7 +9173,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
9173 9173
9174 if (__intel_wait_for_register_fw(dev_priv, 9174 if (__intel_wait_for_register_fw(dev_priv,
9175 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0, 9175 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
9176 500, 0, NULL)) { 9176 timeout_us, 0, NULL)) {
9177 DRM_ERROR("timeout waiting for pcode write of 0x%08x to mbox %x to finish for %ps\n", 9177 DRM_ERROR("timeout waiting for pcode write of 0x%08x to mbox %x to finish for %ps\n",
9178 val, mbox, __builtin_return_address(0)); 9178 val, mbox, __builtin_return_address(0));
9179 return -ETIMEDOUT; 9179 return -ETIMEDOUT;
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 907deac6e3fa..d82ca0f438f5 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -209,8 +209,6 @@ void intel_uc_fini_wq(struct drm_i915_private *dev_priv)
209 if (!USES_GUC(dev_priv)) 209 if (!USES_GUC(dev_priv))
210 return; 210 return;
211 211
212 GEM_BUG_ON(!HAS_GUC(dev_priv));
213
214 intel_guc_fini_wq(&dev_priv->guc); 212 intel_guc_fini_wq(&dev_priv->guc);
215} 213}
216 214
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index e3d7745a9151..98dff6058d3c 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -412,6 +412,8 @@ struct child_device_config {
412 u16 dp_gpio_pin_num; /* 195 */ 412 u16 dp_gpio_pin_num; /* 195 */
413 u8 dp_iboost_level:4; /* 196 */ 413 u8 dp_iboost_level:4; /* 196 */
414 u8 hdmi_iboost_level:4; /* 196 */ 414 u8 hdmi_iboost_level:4; /* 196 */
415 u8 dp_max_link_rate:2; /* 216 CNL+ */
416 u8 dp_max_link_rate_reserved:6; /* 216 */
415} __packed; 417} __packed;
416 418
417struct bdb_general_definitions { 419struct bdb_general_definitions {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index adb78f7d083a..92be0e5269c6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -75,6 +75,7 @@ int mcp89_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
75int gf100_fb_new(struct nvkm_device *, int, struct nvkm_fb **); 75int gf100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
76int gf108_fb_new(struct nvkm_device *, int, struct nvkm_fb **); 76int gf108_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
77int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **); 77int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
78int gk110_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
78int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **); 79int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
79int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **); 80int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
80int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **); 81int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
index 59f3ba551681..b57fe4ae93ba 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
@@ -60,6 +60,7 @@ int nvkm_secboot_reset(struct nvkm_secboot *, unsigned long);
60int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **); 60int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
61int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **); 61int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
62int gp102_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **); 62int gp102_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
63int gp108_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
63int gp10b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **); 64int gp10b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
64 65
65#endif 66#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
index b1ac47eb786e..9398d9f09339 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
@@ -46,6 +46,16 @@ enum nvkm_therm_attr_type {
46 NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST = 17, 46 NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST = 17,
47}; 47};
48 48
49struct nvkm_therm_clkgate_init {
50 u32 addr;
51 u8 count;
52 u32 data;
53};
54
55struct nvkm_therm_clkgate_pack {
56 const struct nvkm_therm_clkgate_init *init;
57};
58
49struct nvkm_therm { 59struct nvkm_therm {
50 const struct nvkm_therm_func *func; 60 const struct nvkm_therm_func *func;
51 struct nvkm_subdev subdev; 61 struct nvkm_subdev subdev;
@@ -85,17 +95,24 @@ struct nvkm_therm {
85 95
86 int (*attr_get)(struct nvkm_therm *, enum nvkm_therm_attr_type); 96 int (*attr_get)(struct nvkm_therm *, enum nvkm_therm_attr_type);
87 int (*attr_set)(struct nvkm_therm *, enum nvkm_therm_attr_type, int); 97 int (*attr_set)(struct nvkm_therm *, enum nvkm_therm_attr_type, int);
98
99 bool clkgating_enabled;
88}; 100};
89 101
90int nvkm_therm_temp_get(struct nvkm_therm *); 102int nvkm_therm_temp_get(struct nvkm_therm *);
91int nvkm_therm_fan_sense(struct nvkm_therm *); 103int nvkm_therm_fan_sense(struct nvkm_therm *);
92int nvkm_therm_cstate(struct nvkm_therm *, int, int); 104int nvkm_therm_cstate(struct nvkm_therm *, int, int);
105void nvkm_therm_clkgate_init(struct nvkm_therm *,
106 const struct nvkm_therm_clkgate_pack *);
107void nvkm_therm_clkgate_enable(struct nvkm_therm *);
108void nvkm_therm_clkgate_fini(struct nvkm_therm *, bool);
93 109
94int nv40_therm_new(struct nvkm_device *, int, struct nvkm_therm **); 110int nv40_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
95int nv50_therm_new(struct nvkm_device *, int, struct nvkm_therm **); 111int nv50_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
96int g84_therm_new(struct nvkm_device *, int, struct nvkm_therm **); 112int g84_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
97int gt215_therm_new(struct nvkm_device *, int, struct nvkm_therm **); 113int gt215_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
98int gf119_therm_new(struct nvkm_device *, int, struct nvkm_therm **); 114int gf119_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
115int gk104_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
99int gm107_therm_new(struct nvkm_device *, int, struct nvkm_therm **); 116int gm107_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
100int gm200_therm_new(struct nvkm_device *, int, struct nvkm_therm **); 117int gm200_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
101int gp100_therm_new(struct nvkm_device *, int, struct nvkm_therm **); 118int gp100_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 7b5cc5c73d20..be8e00b49cde 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -105,4 +105,32 @@ nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
105 return ioptr; 105 return ioptr;
106} 106}
107 107
108static inline void
109nouveau_bo_unmap_unpin_unref(struct nouveau_bo **pnvbo)
110{
111 if (*pnvbo) {
112 nouveau_bo_unmap(*pnvbo);
113 nouveau_bo_unpin(*pnvbo);
114 nouveau_bo_ref(NULL, pnvbo);
115 }
116}
117
118static inline int
119nouveau_bo_new_pin_map(struct nouveau_cli *cli, u64 size, int align, u32 flags,
120 struct nouveau_bo **pnvbo)
121{
122 int ret = nouveau_bo_new(cli, size, align, flags,
123 0, 0, NULL, NULL, pnvbo);
124 if (ret == 0) {
125 ret = nouveau_bo_pin(*pnvbo, flags, true);
126 if (ret == 0) {
127 ret = nouveau_bo_map(*pnvbo);
128 if (ret == 0)
129 return ret;
130 nouveau_bo_unpin(*pnvbo);
131 }
132 nouveau_bo_ref(NULL, pnvbo);
133 }
134 return ret;
135}
108#endif 136#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index b7a18fbee6dc..366acb928f57 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -60,7 +60,6 @@ struct nouveau_crtc {
60 } cursor; 60 } cursor;
61 61
62 struct { 62 struct {
63 struct nouveau_bo *nvbo;
64 int depth; 63 int depth;
65 } lut; 64 } lut;
66 65
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index ee5d1dc2eaf5..85c1f10bc2b6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -56,6 +56,10 @@ MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
56int nouveau_nofbaccel = 0; 56int nouveau_nofbaccel = 0;
57module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); 57module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
58 58
59MODULE_PARM_DESC(fbcon_bpp, "fbcon bits-per-pixel (default: auto)");
60static int nouveau_fbcon_bpp;
61module_param_named(fbcon_bpp, nouveau_fbcon_bpp, int, 0400);
62
59static void 63static void
60nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 64nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
61{ 65{
@@ -488,7 +492,7 @@ nouveau_fbcon_init(struct drm_device *dev)
488{ 492{
489 struct nouveau_drm *drm = nouveau_drm(dev); 493 struct nouveau_drm *drm = nouveau_drm(dev);
490 struct nouveau_fbdev *fbcon; 494 struct nouveau_fbdev *fbcon;
491 int preferred_bpp; 495 int preferred_bpp = nouveau_fbcon_bpp;
492 int ret; 496 int ret;
493 497
494 if (!dev->mode_config.num_crtc || 498 if (!dev->mode_config.num_crtc ||
@@ -512,13 +516,15 @@ nouveau_fbcon_init(struct drm_device *dev)
512 if (ret) 516 if (ret)
513 goto fini; 517 goto fini;
514 518
515 if (drm->client.device.info.ram_size <= 32 * 1024 * 1024) 519 if (preferred_bpp != 8 && preferred_bpp != 16 && preferred_bpp != 32) {
516 preferred_bpp = 8; 520 if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
517 else 521 preferred_bpp = 8;
518 if (drm->client.device.info.ram_size <= 64 * 1024 * 1024) 522 else
519 preferred_bpp = 16; 523 if (drm->client.device.info.ram_size <= 64 * 1024 * 1024)
520 else 524 preferred_bpp = 16;
521 preferred_bpp = 32; 525 else
526 preferred_bpp = 32;
527 }
522 528
523 /* disable all the possible outputs/crtcs before entering KMS mode */ 529 /* disable all the possible outputs/crtcs before entering KMS mode */
524 if (!drm_drv_uses_atomic_modeset(dev)) 530 if (!drm_drv_uses_atomic_modeset(dev))
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index b22c37bde13f..dd8d4352ed99 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -137,8 +137,10 @@ struct nv50_head_atom {
137 } mode; 137 } mode;
138 138
139 struct { 139 struct {
140 bool visible;
140 u32 handle; 141 u32 handle;
141 u64 offset:40; 142 u64 offset:40;
143 u8 mode:4;
142 } lut; 144 } lut;
143 145
144 struct { 146 struct {
@@ -192,6 +194,7 @@ struct nv50_head_atom {
192 194
193 union { 195 union {
194 struct { 196 struct {
197 bool ilut:1;
195 bool core:1; 198 bool core:1;
196 bool curs:1; 199 bool curs:1;
197 }; 200 };
@@ -200,6 +203,7 @@ struct nv50_head_atom {
200 203
201 union { 204 union {
202 struct { 205 struct {
206 bool ilut:1;
203 bool core:1; 207 bool core:1;
204 bool curs:1; 208 bool curs:1;
205 bool view:1; 209 bool view:1;
@@ -660,6 +664,10 @@ nv50_ovly_create(struct nvif_device *device, struct nvif_object *disp,
660 664
661struct nv50_head { 665struct nv50_head {
662 struct nouveau_crtc base; 666 struct nouveau_crtc base;
667 struct {
668 struct nouveau_bo *nvbo[2];
669 int next;
670 } lut;
663 struct nv50_ovly ovly; 671 struct nv50_ovly ovly;
664 struct nv50_oimm oimm; 672 struct nv50_oimm oimm;
665}; 673};
@@ -1795,6 +1803,54 @@ nv50_head_lut_clr(struct nv50_head *head)
1795} 1803}
1796 1804
1797static void 1805static void
1806nv50_head_lut_load(struct drm_property_blob *blob, int mode,
1807 struct nouveau_bo *nvbo)
1808{
1809 struct drm_color_lut *in = (struct drm_color_lut *)blob->data;
1810 void __iomem *lut = (u8 *)nvbo_kmap_obj_iovirtual(nvbo);
1811 const int size = blob->length / sizeof(*in);
1812 int bits, shift, i;
1813 u16 zero, r, g, b;
1814
1815 /* This can't happen.. But it shuts the compiler up. */
1816 if (WARN_ON(size != 256))
1817 return;
1818
1819 switch (mode) {
1820 case 0: /* LORES. */
1821 case 1: /* HIRES. */
1822 bits = 11;
1823 shift = 3;
1824 zero = 0x0000;
1825 break;
1826 case 7: /* INTERPOLATE_257_UNITY_RANGE. */
1827 bits = 14;
1828 shift = 0;
1829 zero = 0x6000;
1830 break;
1831 default:
1832 WARN_ON(1);
1833 return;
1834 }
1835
1836 for (i = 0; i < size; i++) {
1837 r = (drm_color_lut_extract(in[i]. red, bits) + zero) << shift;
1838 g = (drm_color_lut_extract(in[i].green, bits) + zero) << shift;
1839 b = (drm_color_lut_extract(in[i]. blue, bits) + zero) << shift;
1840 writew(r, lut + (i * 0x08) + 0);
1841 writew(g, lut + (i * 0x08) + 2);
1842 writew(b, lut + (i * 0x08) + 4);
1843 }
1844
1845 /* INTERPOLATE modes require a "next" entry to interpolate with,
1846 * so we replicate the last entry to deal with this for now.
1847 */
1848 writew(r, lut + (i * 0x08) + 0);
1849 writew(g, lut + (i * 0x08) + 2);
1850 writew(b, lut + (i * 0x08) + 4);
1851}
1852
1853static void
1798nv50_head_lut_set(struct nv50_head *head, struct nv50_head_atom *asyh) 1854nv50_head_lut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1799{ 1855{
1800 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base; 1856 struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
@@ -1802,18 +1858,18 @@ nv50_head_lut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1802 if ((push = evo_wait(core, 7))) { 1858 if ((push = evo_wait(core, 7))) {
1803 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) { 1859 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
1804 evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2); 1860 evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
1805 evo_data(push, 0xc0000000); 1861 evo_data(push, 0x80000000 | asyh->lut.mode << 30);
1806 evo_data(push, asyh->lut.offset >> 8); 1862 evo_data(push, asyh->lut.offset >> 8);
1807 } else 1863 } else
1808 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) { 1864 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1809 evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2); 1865 evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
1810 evo_data(push, 0xc0000000); 1866 evo_data(push, 0x80000000 | asyh->lut.mode << 30);
1811 evo_data(push, asyh->lut.offset >> 8); 1867 evo_data(push, asyh->lut.offset >> 8);
1812 evo_mthd(push, 0x085c + (head->base.index * 0x400), 1); 1868 evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
1813 evo_data(push, asyh->lut.handle); 1869 evo_data(push, asyh->lut.handle);
1814 } else { 1870 } else {
1815 evo_mthd(push, 0x0440 + (head->base.index * 0x300), 4); 1871 evo_mthd(push, 0x0440 + (head->base.index * 0x300), 4);
1816 evo_data(push, 0x83000000); 1872 evo_data(push, 0x80000000 | asyh->lut.mode << 24);
1817 evo_data(push, asyh->lut.offset >> 8); 1873 evo_data(push, asyh->lut.offset >> 8);
1818 evo_data(push, 0x00000000); 1874 evo_data(push, 0x00000000);
1819 evo_data(push, 0x00000000); 1875 evo_data(push, 0x00000000);
@@ -1896,7 +1952,7 @@ nv50_head_view(struct nv50_head *head, struct nv50_head_atom *asyh)
1896static void 1952static void
1897nv50_head_flush_clr(struct nv50_head *head, struct nv50_head_atom *asyh, bool y) 1953nv50_head_flush_clr(struct nv50_head *head, struct nv50_head_atom *asyh, bool y)
1898{ 1954{
1899 if (asyh->clr.core && (!asyh->set.core || y)) 1955 if (asyh->clr.ilut && (!asyh->set.ilut || y))
1900 nv50_head_lut_clr(head); 1956 nv50_head_lut_clr(head);
1901 if (asyh->clr.core && (!asyh->set.core || y)) 1957 if (asyh->clr.core && (!asyh->set.core || y))
1902 nv50_head_core_clr(head); 1958 nv50_head_core_clr(head);
@@ -1909,7 +1965,15 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1909{ 1965{
1910 if (asyh->set.view ) nv50_head_view (head, asyh); 1966 if (asyh->set.view ) nv50_head_view (head, asyh);
1911 if (asyh->set.mode ) nv50_head_mode (head, asyh); 1967 if (asyh->set.mode ) nv50_head_mode (head, asyh);
1912 if (asyh->set.core ) nv50_head_lut_set (head, asyh); 1968 if (asyh->set.ilut ) {
1969 struct nouveau_bo *nvbo = head->lut.nvbo[head->lut.next];
1970 struct drm_property_blob *blob = asyh->state.gamma_lut;
1971 if (blob)
1972 nv50_head_lut_load(blob, asyh->lut.mode, nvbo);
1973 asyh->lut.offset = nvbo->bo.offset;
1974 head->lut.next ^= 1;
1975 nv50_head_lut_set(head, asyh);
1976 }
1913 if (asyh->set.core ) nv50_head_core_set(head, asyh); 1977 if (asyh->set.core ) nv50_head_core_set(head, asyh);
1914 if (asyh->set.curs ) nv50_head_curs_set(head, asyh); 1978 if (asyh->set.curs ) nv50_head_curs_set(head, asyh);
1915 if (asyh->set.base ) nv50_head_base (head, asyh); 1979 if (asyh->set.base ) nv50_head_base (head, asyh);
@@ -2044,6 +2108,37 @@ nv50_head_atomic_check_view(struct nv50_head_atom *armh,
2044} 2108}
2045 2109
2046static void 2110static void
2111nv50_head_atomic_check_lut(struct nv50_head *head,
2112 struct nv50_head_atom *armh,
2113 struct nv50_head_atom *asyh)
2114{
2115 struct nv50_disp *disp = nv50_disp(head->base.base.dev);
2116
2117 /* An I8 surface without an input LUT makes no sense, and
2118 * EVO will throw an error if you try.
2119 *
2120 * Legacy clients actually cause this due to the order in
2121 * which they call ioctls, so we will enable the LUT with
2122 * whatever contents the buffer already contains to avoid
2123 * triggering the error check.
2124 */
2125 if (!asyh->state.gamma_lut && asyh->base.cpp != 1) {
2126 asyh->lut.handle = 0;
2127 asyh->clr.ilut = armh->lut.visible;
2128 return;
2129 }
2130
2131 if (disp->disp->oclass < GF110_DISP) {
2132 asyh->lut.mode = (asyh->base.cpp == 1) ? 0 : 1;
2133 asyh->set.ilut = true;
2134 } else {
2135 asyh->lut.mode = 7;
2136 asyh->set.ilut = asyh->state.color_mgmt_changed;
2137 }
2138 asyh->lut.handle = disp->mast.base.vram.handle;
2139}
2140
2141static void
2047nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh) 2142nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
2048{ 2143{
2049 struct drm_display_mode *mode = &asyh->state.adjusted_mode; 2144 struct drm_display_mode *mode = &asyh->state.adjusted_mode;
@@ -2128,6 +2223,11 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
2128 if (asyh->state.mode_changed) 2223 if (asyh->state.mode_changed)
2129 nv50_head_atomic_check_mode(head, asyh); 2224 nv50_head_atomic_check_mode(head, asyh);
2130 2225
2226 if (asyh->state.color_mgmt_changed ||
2227 asyh->base.cpp != armh->base.cpp)
2228 nv50_head_atomic_check_lut(head, armh, asyh);
2229 asyh->lut.visible = asyh->lut.handle != 0;
2230
2131 if (asyc) { 2231 if (asyc) {
2132 if (asyc->set.scaler) 2232 if (asyc->set.scaler)
2133 nv50_head_atomic_check_view(armh, asyh, asyc); 2233 nv50_head_atomic_check_view(armh, asyh, asyc);
@@ -2143,7 +2243,8 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
2143 asyh->core.w = asyh->base.w; 2243 asyh->core.w = asyh->base.w;
2144 asyh->core.h = asyh->base.h; 2244 asyh->core.h = asyh->base.h;
2145 } else 2245 } else
2146 if ((asyh->core.visible = asyh->curs.visible)) { 2246 if ((asyh->core.visible = asyh->curs.visible) ||
2247 (asyh->core.visible = asyh->lut.visible)) {
2147 /*XXX: We need to either find some way of having the 2248 /*XXX: We need to either find some way of having the
2148 * primary base layer appear black, while still 2249 * primary base layer appear black, while still
2149 * being able to display the other layers, or we 2250 * being able to display the other layers, or we
@@ -2161,11 +2262,10 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
2161 asyh->core.layout = 1; 2262 asyh->core.layout = 1;
2162 asyh->core.block = 0; 2263 asyh->core.block = 0;
2163 asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4; 2264 asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4;
2164 asyh->lut.handle = disp->mast.base.vram.handle;
2165 asyh->lut.offset = head->base.lut.nvbo->bo.offset;
2166 asyh->set.base = armh->base.cpp != asyh->base.cpp; 2265 asyh->set.base = armh->base.cpp != asyh->base.cpp;
2167 asyh->set.ovly = armh->ovly.cpp != asyh->ovly.cpp; 2266 asyh->set.ovly = armh->ovly.cpp != asyh->ovly.cpp;
2168 } else { 2267 } else {
2268 asyh->lut.visible = false;
2169 asyh->core.visible = false; 2269 asyh->core.visible = false;
2170 asyh->curs.visible = false; 2270 asyh->curs.visible = false;
2171 asyh->base.cpp = 0; 2271 asyh->base.cpp = 0;
@@ -2189,8 +2289,10 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
2189 asyh->clr.curs = true; 2289 asyh->clr.curs = true;
2190 } 2290 }
2191 } else { 2291 } else {
2292 asyh->clr.ilut = armh->lut.visible;
2192 asyh->clr.core = armh->core.visible; 2293 asyh->clr.core = armh->core.visible;
2193 asyh->clr.curs = armh->curs.visible; 2294 asyh->clr.curs = armh->curs.visible;
2295 asyh->set.ilut = asyh->lut.visible;
2194 asyh->set.core = asyh->core.visible; 2296 asyh->set.core = asyh->core.visible;
2195 asyh->set.curs = asyh->curs.visible; 2297 asyh->set.curs = asyh->curs.visible;
2196 } 2298 }
@@ -2200,47 +2302,11 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
2200 return 0; 2302 return 0;
2201} 2303}
2202 2304
2203static void
2204nv50_head_lut_load(struct drm_crtc *crtc)
2205{
2206 struct nv50_disp *disp = nv50_disp(crtc->dev);
2207 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
2208 void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
2209 u16 *r, *g, *b;
2210 int i;
2211
2212 r = crtc->gamma_store;
2213 g = r + crtc->gamma_size;
2214 b = g + crtc->gamma_size;
2215
2216 for (i = 0; i < 256; i++) {
2217 if (disp->disp->oclass < GF110_DISP) {
2218 writew((*r++ >> 2) + 0x0000, lut + (i * 0x08) + 0);
2219 writew((*g++ >> 2) + 0x0000, lut + (i * 0x08) + 2);
2220 writew((*b++ >> 2) + 0x0000, lut + (i * 0x08) + 4);
2221 } else {
2222 /* 0x6000 interferes with the 14-bit color??? */
2223 writew((*r++ >> 2) + 0x6000, lut + (i * 0x20) + 0);
2224 writew((*g++ >> 2) + 0x6000, lut + (i * 0x20) + 2);
2225 writew((*b++ >> 2) + 0x6000, lut + (i * 0x20) + 4);
2226 }
2227 }
2228}
2229
2230static const struct drm_crtc_helper_funcs 2305static const struct drm_crtc_helper_funcs
2231nv50_head_help = { 2306nv50_head_help = {
2232 .atomic_check = nv50_head_atomic_check, 2307 .atomic_check = nv50_head_atomic_check,
2233}; 2308};
2234 2309
2235static int
2236nv50_head_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
2237 uint32_t size,
2238 struct drm_modeset_acquire_ctx *ctx)
2239{
2240 nv50_head_lut_load(crtc);
2241 return 0;
2242}
2243
2244static void 2310static void
2245nv50_head_atomic_destroy_state(struct drm_crtc *crtc, 2311nv50_head_atomic_destroy_state(struct drm_crtc *crtc,
2246 struct drm_crtc_state *state) 2312 struct drm_crtc_state *state)
@@ -2296,17 +2362,15 @@ nv50_head_reset(struct drm_crtc *crtc)
2296static void 2362static void
2297nv50_head_destroy(struct drm_crtc *crtc) 2363nv50_head_destroy(struct drm_crtc *crtc)
2298{ 2364{
2299 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
2300 struct nv50_disp *disp = nv50_disp(crtc->dev); 2365 struct nv50_disp *disp = nv50_disp(crtc->dev);
2301 struct nv50_head *head = nv50_head(crtc); 2366 struct nv50_head *head = nv50_head(crtc);
2367 int i;
2302 2368
2303 nv50_dmac_destroy(&head->ovly.base, disp->disp); 2369 nv50_dmac_destroy(&head->ovly.base, disp->disp);
2304 nv50_pioc_destroy(&head->oimm.base); 2370 nv50_pioc_destroy(&head->oimm.base);
2305 2371
2306 nouveau_bo_unmap(nv_crtc->lut.nvbo); 2372 for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++)
2307 if (nv_crtc->lut.nvbo) 2373 nouveau_bo_unmap_unpin_unref(&head->lut.nvbo[i]);
2308 nouveau_bo_unpin(nv_crtc->lut.nvbo);
2309 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
2310 2374
2311 drm_crtc_cleanup(crtc); 2375 drm_crtc_cleanup(crtc);
2312 kfree(crtc); 2376 kfree(crtc);
@@ -2315,7 +2379,7 @@ nv50_head_destroy(struct drm_crtc *crtc)
2315static const struct drm_crtc_funcs 2379static const struct drm_crtc_funcs
2316nv50_head_func = { 2380nv50_head_func = {
2317 .reset = nv50_head_reset, 2381 .reset = nv50_head_reset,
2318 .gamma_set = nv50_head_gamma_set, 2382 .gamma_set = drm_atomic_helper_legacy_gamma_set,
2319 .destroy = nv50_head_destroy, 2383 .destroy = nv50_head_destroy,
2320 .set_config = drm_atomic_helper_set_config, 2384 .set_config = drm_atomic_helper_set_config,
2321 .page_flip = drm_atomic_helper_page_flip, 2385 .page_flip = drm_atomic_helper_page_flip,
@@ -2333,7 +2397,7 @@ nv50_head_create(struct drm_device *dev, int index)
2333 struct nv50_base *base; 2397 struct nv50_base *base;
2334 struct nv50_curs *curs; 2398 struct nv50_curs *curs;
2335 struct drm_crtc *crtc; 2399 struct drm_crtc *crtc;
2336 int ret; 2400 int ret, i;
2337 2401
2338 head = kzalloc(sizeof(*head), GFP_KERNEL); 2402 head = kzalloc(sizeof(*head), GFP_KERNEL);
2339 if (!head) 2403 if (!head)
@@ -2355,22 +2419,14 @@ nv50_head_create(struct drm_device *dev, int index)
2355 drm_crtc_helper_add(crtc, &nv50_head_help); 2419 drm_crtc_helper_add(crtc, &nv50_head_help);
2356 drm_mode_crtc_set_gamma_size(crtc, 256); 2420 drm_mode_crtc_set_gamma_size(crtc, 256);
2357 2421
2358 ret = nouveau_bo_new(&drm->client, 8192, 0x100, TTM_PL_FLAG_VRAM, 2422 for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++) {
2359 0, 0x0000, NULL, NULL, &head->base.lut.nvbo); 2423 ret = nouveau_bo_new_pin_map(&drm->client, 1025 * 8, 0x100,
2360 if (!ret) { 2424 TTM_PL_FLAG_VRAM,
2361 ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM, true); 2425 &head->lut.nvbo[i]);
2362 if (!ret) {
2363 ret = nouveau_bo_map(head->base.lut.nvbo);
2364 if (ret)
2365 nouveau_bo_unpin(head->base.lut.nvbo);
2366 }
2367 if (ret) 2426 if (ret)
2368 nouveau_bo_ref(NULL, &head->base.lut.nvbo); 2427 goto out;
2369 } 2428 }
2370 2429
2371 if (ret)
2372 goto out;
2373
2374 /* allocate overlay resources */ 2430 /* allocate overlay resources */
2375 ret = nv50_oimm_create(device, disp->disp, index, &head->oimm); 2431 ret = nv50_oimm_create(device, disp->disp, index, &head->oimm);
2376 if (ret) 2432 if (ret)
@@ -4350,7 +4406,6 @@ nv50_display_init(struct drm_device *dev)
4350{ 4406{
4351 struct drm_encoder *encoder; 4407 struct drm_encoder *encoder;
4352 struct drm_plane *plane; 4408 struct drm_plane *plane;
4353 struct drm_crtc *crtc;
4354 u32 *push; 4409 u32 *push;
4355 4410
4356 push = evo_wait(nv50_mast(dev), 32); 4411 push = evo_wait(nv50_mast(dev), 32);
@@ -4369,10 +4424,6 @@ nv50_display_init(struct drm_device *dev)
4369 } 4424 }
4370 } 4425 }
4371 4426
4372 drm_for_each_crtc(crtc, dev) {
4373 nv50_head_lut_load(crtc);
4374 }
4375
4376 drm_for_each_plane(plane, dev) { 4427 drm_for_each_plane(plane, dev) {
4377 struct nv50_wndw *wndw = nv50_wndw(plane); 4428 struct nv50_wndw *wndw = nv50_wndw(plane);
4378 if (plane->funcs != &nv50_wndw) 4429 if (plane->funcs != &nv50_wndw)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 08e77cd55e6e..05cd674326a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -28,6 +28,7 @@
28#include <core/option.h> 28#include <core/option.h>
29 29
30#include <subdev/bios.h> 30#include <subdev/bios.h>
31#include <subdev/therm.h>
31 32
32static DEFINE_MUTEX(nv_devices_mutex); 33static DEFINE_MUTEX(nv_devices_mutex);
33static LIST_HEAD(nv_devices); 34static LIST_HEAD(nv_devices);
@@ -1682,7 +1683,7 @@ nve4_chipset = {
1682 .mxm = nv50_mxm_new, 1683 .mxm = nv50_mxm_new,
1683 .pci = gk104_pci_new, 1684 .pci = gk104_pci_new,
1684 .pmu = gk104_pmu_new, 1685 .pmu = gk104_pmu_new,
1685 .therm = gf119_therm_new, 1686 .therm = gk104_therm_new,
1686 .timer = nv41_timer_new, 1687 .timer = nv41_timer_new,
1687 .top = gk104_top_new, 1688 .top = gk104_top_new,
1688 .volt = gk104_volt_new, 1689 .volt = gk104_volt_new,
@@ -1721,7 +1722,7 @@ nve6_chipset = {
1721 .mxm = nv50_mxm_new, 1722 .mxm = nv50_mxm_new,
1722 .pci = gk104_pci_new, 1723 .pci = gk104_pci_new,
1723 .pmu = gk104_pmu_new, 1724 .pmu = gk104_pmu_new,
1724 .therm = gf119_therm_new, 1725 .therm = gk104_therm_new,
1725 .timer = nv41_timer_new, 1726 .timer = nv41_timer_new,
1726 .top = gk104_top_new, 1727 .top = gk104_top_new,
1727 .volt = gk104_volt_new, 1728 .volt = gk104_volt_new,
@@ -1760,7 +1761,7 @@ nve7_chipset = {
1760 .mxm = nv50_mxm_new, 1761 .mxm = nv50_mxm_new,
1761 .pci = gk104_pci_new, 1762 .pci = gk104_pci_new,
1762 .pmu = gk104_pmu_new, 1763 .pmu = gk104_pmu_new,
1763 .therm = gf119_therm_new, 1764 .therm = gk104_therm_new,
1764 .timer = nv41_timer_new, 1765 .timer = nv41_timer_new,
1765 .top = gk104_top_new, 1766 .top = gk104_top_new,
1766 .volt = gk104_volt_new, 1767 .volt = gk104_volt_new,
@@ -1811,7 +1812,7 @@ nvf0_chipset = {
1811 .bus = gf100_bus_new, 1812 .bus = gf100_bus_new,
1812 .clk = gk104_clk_new, 1813 .clk = gk104_clk_new,
1813 .devinit = gf100_devinit_new, 1814 .devinit = gf100_devinit_new,
1814 .fb = gk104_fb_new, 1815 .fb = gk110_fb_new,
1815 .fuse = gf100_fuse_new, 1816 .fuse = gf100_fuse_new,
1816 .gpio = gk104_gpio_new, 1817 .gpio = gk104_gpio_new,
1817 .i2c = gk104_i2c_new, 1818 .i2c = gk104_i2c_new,
@@ -1824,7 +1825,7 @@ nvf0_chipset = {
1824 .mxm = nv50_mxm_new, 1825 .mxm = nv50_mxm_new,
1825 .pci = gk104_pci_new, 1826 .pci = gk104_pci_new,
1826 .pmu = gk110_pmu_new, 1827 .pmu = gk110_pmu_new,
1827 .therm = gf119_therm_new, 1828 .therm = gk104_therm_new,
1828 .timer = nv41_timer_new, 1829 .timer = nv41_timer_new,
1829 .top = gk104_top_new, 1830 .top = gk104_top_new,
1830 .volt = gk104_volt_new, 1831 .volt = gk104_volt_new,
@@ -1849,7 +1850,7 @@ nvf1_chipset = {
1849 .bus = gf100_bus_new, 1850 .bus = gf100_bus_new,
1850 .clk = gk104_clk_new, 1851 .clk = gk104_clk_new,
1851 .devinit = gf100_devinit_new, 1852 .devinit = gf100_devinit_new,
1852 .fb = gk104_fb_new, 1853 .fb = gk110_fb_new,
1853 .fuse = gf100_fuse_new, 1854 .fuse = gf100_fuse_new,
1854 .gpio = gk104_gpio_new, 1855 .gpio = gk104_gpio_new,
1855 .i2c = gk104_i2c_new, 1856 .i2c = gk104_i2c_new,
@@ -1862,7 +1863,7 @@ nvf1_chipset = {
1862 .mxm = nv50_mxm_new, 1863 .mxm = nv50_mxm_new,
1863 .pci = gk104_pci_new, 1864 .pci = gk104_pci_new,
1864 .pmu = gk110_pmu_new, 1865 .pmu = gk110_pmu_new,
1865 .therm = gf119_therm_new, 1866 .therm = gk104_therm_new,
1866 .timer = nv41_timer_new, 1867 .timer = nv41_timer_new,
1867 .top = gk104_top_new, 1868 .top = gk104_top_new,
1868 .volt = gk104_volt_new, 1869 .volt = gk104_volt_new,
@@ -1887,7 +1888,7 @@ nv106_chipset = {
1887 .bus = gf100_bus_new, 1888 .bus = gf100_bus_new,
1888 .clk = gk104_clk_new, 1889 .clk = gk104_clk_new,
1889 .devinit = gf100_devinit_new, 1890 .devinit = gf100_devinit_new,
1890 .fb = gk104_fb_new, 1891 .fb = gk110_fb_new,
1891 .fuse = gf100_fuse_new, 1892 .fuse = gf100_fuse_new,
1892 .gpio = gk104_gpio_new, 1893 .gpio = gk104_gpio_new,
1893 .i2c = gk104_i2c_new, 1894 .i2c = gk104_i2c_new,
@@ -1900,7 +1901,7 @@ nv106_chipset = {
1900 .mxm = nv50_mxm_new, 1901 .mxm = nv50_mxm_new,
1901 .pci = gk104_pci_new, 1902 .pci = gk104_pci_new,
1902 .pmu = gk208_pmu_new, 1903 .pmu = gk208_pmu_new,
1903 .therm = gf119_therm_new, 1904 .therm = gk104_therm_new,
1904 .timer = nv41_timer_new, 1905 .timer = nv41_timer_new,
1905 .top = gk104_top_new, 1906 .top = gk104_top_new,
1906 .volt = gk104_volt_new, 1907 .volt = gk104_volt_new,
@@ -1925,7 +1926,7 @@ nv108_chipset = {
1925 .bus = gf100_bus_new, 1926 .bus = gf100_bus_new,
1926 .clk = gk104_clk_new, 1927 .clk = gk104_clk_new,
1927 .devinit = gf100_devinit_new, 1928 .devinit = gf100_devinit_new,
1928 .fb = gk104_fb_new, 1929 .fb = gk110_fb_new,
1929 .fuse = gf100_fuse_new, 1930 .fuse = gf100_fuse_new,
1930 .gpio = gk104_gpio_new, 1931 .gpio = gk104_gpio_new,
1931 .i2c = gk104_i2c_new, 1932 .i2c = gk104_i2c_new,
@@ -1938,7 +1939,7 @@ nv108_chipset = {
1938 .mxm = nv50_mxm_new, 1939 .mxm = nv50_mxm_new,
1939 .pci = gk104_pci_new, 1940 .pci = gk104_pci_new,
1940 .pmu = gk208_pmu_new, 1941 .pmu = gk208_pmu_new,
1941 .therm = gf119_therm_new, 1942 .therm = gk104_therm_new,
1942 .timer = nv41_timer_new, 1943 .timer = nv41_timer_new,
1943 .top = gk104_top_new, 1944 .top = gk104_top_new,
1944 .volt = gk104_volt_new, 1945 .volt = gk104_volt_new,
@@ -2345,6 +2346,7 @@ nv138_chipset = {
2345 .mc = gp100_mc_new, 2346 .mc = gp100_mc_new,
2346 .mmu = gp100_mmu_new, 2347 .mmu = gp100_mmu_new,
2347 .therm = gp100_therm_new, 2348 .therm = gp100_therm_new,
2349 .secboot = gp108_secboot_new,
2348 .pci = gp100_pci_new, 2350 .pci = gp100_pci_new,
2349 .pmu = gp102_pmu_new, 2351 .pmu = gp102_pmu_new,
2350 .timer = gk20a_timer_new, 2352 .timer = gk20a_timer_new,
@@ -2356,6 +2358,10 @@ nv138_chipset = {
2356 .disp = gp102_disp_new, 2358 .disp = gp102_disp_new,
2357 .dma = gf119_dma_new, 2359 .dma = gf119_dma_new,
2358 .fifo = gp100_fifo_new, 2360 .fifo = gp100_fifo_new,
2361 .gr = gp107_gr_new,
2362 .nvdec = gp102_nvdec_new,
2363 .sec2 = gp102_sec2_new,
2364 .sw = gf100_sw_new,
2359}; 2365};
2360 2366
2361static const struct nvkm_device_chip 2367static const struct nvkm_device_chip
@@ -2508,6 +2514,7 @@ nvkm_device_fini(struct nvkm_device *device, bool suspend)
2508 } 2514 }
2509 } 2515 }
2510 2516
2517 nvkm_therm_clkgate_fini(device->therm, suspend);
2511 2518
2512 if (device->func->fini) 2519 if (device->func->fini)
2513 device->func->fini(device, suspend); 2520 device->func->fini(device, suspend);
@@ -2597,6 +2604,7 @@ nvkm_device_init(struct nvkm_device *device)
2597 } 2604 }
2598 2605
2599 nvkm_acpi_init(device); 2606 nvkm_acpi_init(device);
2607 nvkm_therm_clkgate_enable(device->therm);
2600 2608
2601 time = ktime_to_us(ktime_get()) - time; 2609 time = ktime_to_us(ktime_get()) - time;
2602 nvdev_trace(device, "init completed in %lldus\n", time); 2610 nvdev_trace(device, "init completed in %lldus\n", time);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index d7c2adb9b543..c8ec3fd97155 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -137,6 +137,7 @@ struct gf100_gr_func {
137 int (*rops)(struct gf100_gr *); 137 int (*rops)(struct gf100_gr *);
138 int ppc_nr; 138 int ppc_nr;
139 const struct gf100_grctx_func *grctx; 139 const struct gf100_grctx_func *grctx;
140 const struct nvkm_therm_clkgate_pack *clkgate_pack;
140 struct nvkm_sclass sclass[]; 141 struct nvkm_sclass sclass[];
141}; 142};
142 143
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 5e82f94c2245..1b52fcb2c49a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -22,6 +22,7 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24#include "gf100.h" 24#include "gf100.h"
25#include "gk104.h"
25#include "ctxgf100.h" 26#include "ctxgf100.h"
26 27
27#include <nvif/class.h> 28#include <nvif/class.h>
@@ -173,6 +174,208 @@ gk104_gr_pack_mmio[] = {
173 {} 174 {}
174}; 175};
175 176
177const struct nvkm_therm_clkgate_init
178gk104_clkgate_blcg_init_main_0[] = {
179 { 0x4041f0, 1, 0x00004046 },
180 { 0x409890, 1, 0x00000045 },
181 { 0x4098b0, 1, 0x0000007f },
182 {}
183};
184
185const struct nvkm_therm_clkgate_init
186gk104_clkgate_blcg_init_rstr2d_0[] = {
187 { 0x4078c0, 1, 0x00000042 },
188 {}
189};
190
191const struct nvkm_therm_clkgate_init
192gk104_clkgate_blcg_init_unk_0[] = {
193 { 0x406000, 1, 0x00004044 },
194 { 0x405860, 1, 0x00004042 },
195 { 0x40590c, 1, 0x00004042 },
196 {}
197};
198
199const struct nvkm_therm_clkgate_init
200gk104_clkgate_blcg_init_gcc_0[] = {
201 { 0x408040, 1, 0x00004044 },
202 {}
203};
204
205const struct nvkm_therm_clkgate_init
206gk104_clkgate_blcg_init_sked_0[] = {
207 { 0x407000, 1, 0x00004044 },
208 {}
209};
210
211const struct nvkm_therm_clkgate_init
212gk104_clkgate_blcg_init_unk_1[] = {
213 { 0x405bf0, 1, 0x00004044 },
214 {}
215};
216
217const struct nvkm_therm_clkgate_init
218gk104_clkgate_blcg_init_gpc_ctxctl_0[] = {
219 { 0x41a890, 1, 0x00000042 },
220 { 0x41a8b0, 1, 0x0000007f },
221 {}
222};
223
224const struct nvkm_therm_clkgate_init
225gk104_clkgate_blcg_init_gpc_unk_0[] = {
226 { 0x418500, 1, 0x00004042 },
227 { 0x418608, 1, 0x00004042 },
228 { 0x418688, 1, 0x00004042 },
229 { 0x418718, 1, 0x00000042 },
230 {}
231};
232
233const struct nvkm_therm_clkgate_init
234gk104_clkgate_blcg_init_gpc_esetup_0[] = {
235 { 0x418828, 1, 0x00000044 },
236 {}
237};
238
239const struct nvkm_therm_clkgate_init
240gk104_clkgate_blcg_init_gpc_tpbus_0[] = {
241 { 0x418bbc, 1, 0x00004042 },
242 {}
243};
244
245const struct nvkm_therm_clkgate_init
246gk104_clkgate_blcg_init_gpc_zcull_0[] = {
247 { 0x418970, 1, 0x00004042 },
248 {}
249};
250
251const struct nvkm_therm_clkgate_init
252gk104_clkgate_blcg_init_gpc_tpconf_0[] = {
253 { 0x418c70, 1, 0x00004042 },
254 {}
255};
256
257const struct nvkm_therm_clkgate_init
258gk104_clkgate_blcg_init_gpc_unk_1[] = {
259 { 0x418cf0, 1, 0x00004042 },
260 { 0x418d70, 1, 0x00004042 },
261 { 0x418f0c, 1, 0x00004042 },
262 { 0x418e0c, 1, 0x00004042 },
263 {}
264};
265
266const struct nvkm_therm_clkgate_init
267gk104_clkgate_blcg_init_gpc_gcc_0[] = {
268 { 0x419020, 1, 0x00004042 },
269 { 0x419038, 1, 0x00000042 },
270 {}
271};
272
273const struct nvkm_therm_clkgate_init
274gk104_clkgate_blcg_init_gpc_ffb_0[] = {
275 { 0x418898, 1, 0x00000042 },
276 {}
277};
278
279const struct nvkm_therm_clkgate_init
280gk104_clkgate_blcg_init_gpc_tex_0[] = {
281 { 0x419a40, 9, 0x00004042 },
282 { 0x419acc, 1, 0x00004047 },
283 {}
284};
285
286const struct nvkm_therm_clkgate_init
287gk104_clkgate_blcg_init_gpc_poly_0[] = {
288 { 0x419868, 1, 0x00000042 },
289 {}
290};
291
292const struct nvkm_therm_clkgate_init
293gk104_clkgate_blcg_init_gpc_l1c_0[] = {
294 { 0x419ccc, 3, 0x00000042 },
295 {}
296};
297
298const struct nvkm_therm_clkgate_init
299gk104_clkgate_blcg_init_gpc_unk_2[] = {
300 { 0x419c70, 1, 0x00004045 },
301 {}
302};
303
304const struct nvkm_therm_clkgate_init
305gk104_clkgate_blcg_init_gpc_mp_0[] = {
306 { 0x419fd0, 1, 0x00004043 },
307 { 0x419fd8, 1, 0x00004049 },
308 { 0x419fe0, 2, 0x00004042 },
309 { 0x419ff0, 1, 0x00004046 },
310 { 0x419ff8, 1, 0x00004042 },
311 {}
312};
313
314const struct nvkm_therm_clkgate_init
315gk104_clkgate_blcg_init_gpc_ppc_0[] = {
316 { 0x41be28, 1, 0x00000042 },
317 { 0x41bfe8, 1, 0x00004042 },
318 { 0x41bed0, 1, 0x00004042 },
319 {}
320};
321
322const struct nvkm_therm_clkgate_init
323gk104_clkgate_blcg_init_rop_zrop_0[] = {
324 { 0x408810, 2, 0x00004042 },
325 {}
326};
327
328const struct nvkm_therm_clkgate_init
329gk104_clkgate_blcg_init_rop_0[] = {
330 { 0x408a80, 6, 0x00004042 },
331 {}
332};
333
334const struct nvkm_therm_clkgate_init
335gk104_clkgate_blcg_init_rop_crop_0[] = {
336 { 0x4089a8, 1, 0x00004042 },
337 { 0x4089b0, 1, 0x00000042 },
338 { 0x4089b8, 1, 0x00004042 },
339 {}
340};
341
342const struct nvkm_therm_clkgate_init
343gk104_clkgate_blcg_init_pxbar_0[] = {
344 { 0x13c820, 1, 0x0001007f },
345 { 0x13cbe0, 1, 0x00000042 },
346 {}
347};
348
349static const struct nvkm_therm_clkgate_pack
350gk104_clkgate_pack[] = {
351 { gk104_clkgate_blcg_init_main_0 },
352 { gk104_clkgate_blcg_init_rstr2d_0 },
353 { gk104_clkgate_blcg_init_unk_0 },
354 { gk104_clkgate_blcg_init_gcc_0 },
355 { gk104_clkgate_blcg_init_sked_0 },
356 { gk104_clkgate_blcg_init_unk_1 },
357 { gk104_clkgate_blcg_init_gpc_ctxctl_0 },
358 { gk104_clkgate_blcg_init_gpc_unk_0 },
359 { gk104_clkgate_blcg_init_gpc_esetup_0 },
360 { gk104_clkgate_blcg_init_gpc_tpbus_0 },
361 { gk104_clkgate_blcg_init_gpc_zcull_0 },
362 { gk104_clkgate_blcg_init_gpc_tpconf_0 },
363 { gk104_clkgate_blcg_init_gpc_unk_1 },
364 { gk104_clkgate_blcg_init_gpc_gcc_0 },
365 { gk104_clkgate_blcg_init_gpc_ffb_0 },
366 { gk104_clkgate_blcg_init_gpc_tex_0 },
367 { gk104_clkgate_blcg_init_gpc_poly_0 },
368 { gk104_clkgate_blcg_init_gpc_l1c_0 },
369 { gk104_clkgate_blcg_init_gpc_unk_2 },
370 { gk104_clkgate_blcg_init_gpc_mp_0 },
371 { gk104_clkgate_blcg_init_gpc_ppc_0 },
372 { gk104_clkgate_blcg_init_rop_zrop_0 },
373 { gk104_clkgate_blcg_init_rop_0 },
374 { gk104_clkgate_blcg_init_rop_crop_0 },
375 { gk104_clkgate_blcg_init_pxbar_0 },
376 {}
377};
378
176/******************************************************************************* 379/*******************************************************************************
177 * PGRAPH engine/subdev functions 380 * PGRAPH engine/subdev functions
178 ******************************************************************************/ 381 ******************************************************************************/
@@ -214,6 +417,9 @@ gk104_gr_init(struct gf100_gr *gr)
214 gr->func->init_gpc_mmu(gr); 417 gr->func->init_gpc_mmu(gr);
215 418
216 gf100_gr_mmio(gr, gr->func->mmio); 419 gf100_gr_mmio(gr, gr->func->mmio);
420 if (gr->func->clkgate_pack)
421 nvkm_therm_clkgate_init(gr->base.engine.subdev.device->therm,
422 gr->func->clkgate_pack);
217 423
218 nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001); 424 nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
219 425
@@ -338,6 +544,7 @@ gk104_gr = {
338 .rops = gf100_gr_rops, 544 .rops = gf100_gr_rops,
339 .ppc_nr = 1, 545 .ppc_nr = 1,
340 .grctx = &gk104_grctx, 546 .grctx = &gk104_grctx,
547 .clkgate_pack = gk104_clkgate_pack,
341 .sclass = { 548 .sclass = {
342 { -1, -1, FERMI_TWOD_A }, 549 { -1, -1, FERMI_TWOD_A },
343 { -1, -1, KEPLER_INLINE_TO_MEMORY_A }, 550 { -1, -1, KEPLER_INLINE_TO_MEMORY_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.h
new file mode 100644
index 000000000000..a24c177365d1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.h
@@ -0,0 +1,55 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Lyude Paul <lyude@redhat.com>
23 */
24#ifndef __GK104_GR_H__
25#define __GK104_GR_H__
26
27#include <subdev/therm.h>
28
29extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_main_0[];
30extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_rstr2d_0[];
31extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_unk_0[];
32extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gcc_0[];
33extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_sked_0[];
34extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_unk_1[];
35extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_ctxctl_0[];
36extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_unk_0[];
37extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_esetup_0[];
38extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_tpbus_0[];
39extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_zcull_0[];
40extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_tpconf_0[];
41extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_unk_1[];
42extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_gcc_0[];
43extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_ffb_0[];
44extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_tex_0[];
45extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_poly_0[];
46extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_l1c_0[];
47extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_unk_2[];
48extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_mp_0[];
49extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_ppc_0[];
50extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_rop_zrop_0[];
51extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_rop_0[];
52extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_rop_crop_0[];
53extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_pxbar_0[];
54
55#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index a38e19b61c1d..4da916a9fc73 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -22,6 +22,7 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24#include "gf100.h" 24#include "gf100.h"
25#include "gk104.h"
25#include "ctxgf100.h" 26#include "ctxgf100.h"
26 27
27#include <subdev/timer.h> 28#include <subdev/timer.h>
@@ -156,6 +157,159 @@ gk110_gr_pack_mmio[] = {
156 {} 157 {}
157}; 158};
158 159
160static const struct nvkm_therm_clkgate_init
161gk110_clkgate_blcg_init_sked_0[] = {
162 { 0x407000, 1, 0x00004041 },
163 {}
164};
165
166static const struct nvkm_therm_clkgate_init
167gk110_clkgate_blcg_init_gpc_gcc_0[] = {
168 { 0x419020, 1, 0x00000042 },
169 { 0x419038, 1, 0x00000042 },
170 {}
171};
172
173static const struct nvkm_therm_clkgate_init
174gk110_clkgate_blcg_init_gpc_l1c_0[] = {
175 { 0x419cd4, 2, 0x00004042 },
176 {}
177};
178
179static const struct nvkm_therm_clkgate_init
180gk110_clkgate_blcg_init_gpc_mp_0[] = {
181 { 0x419fd0, 1, 0x00004043 },
182 { 0x419fd8, 1, 0x00004049 },
183 { 0x419fe0, 2, 0x00004042 },
184 { 0x419ff0, 1, 0x00000046 },
185 { 0x419ff8, 1, 0x00004042 },
186 { 0x419f90, 1, 0x00004042 },
187 {}
188};
189
190static const struct nvkm_therm_clkgate_init
191gk110_clkgate_slcg_init_main_0[] = {
192 { 0x4041f4, 1, 0x00000000 },
193 { 0x409894, 1, 0x00000000 },
194 {}
195};
196
197static const struct nvkm_therm_clkgate_init
198gk110_clkgate_slcg_init_unk_0[] = {
199 { 0x406004, 1, 0x00000000 },
200 {}
201};
202
203static const struct nvkm_therm_clkgate_init
204gk110_clkgate_slcg_init_sked_0[] = {
205 { 0x407004, 1, 0x00000000 },
206 {}
207};
208
209static const struct nvkm_therm_clkgate_init
210gk110_clkgate_slcg_init_gpc_ctxctl_0[] = {
211 { 0x41a894, 1, 0x00000000 },
212 {}
213};
214
215static const struct nvkm_therm_clkgate_init
216gk110_clkgate_slcg_init_gpc_unk_0[] = {
217 { 0x418504, 1, 0x00000000 },
218 { 0x41860c, 1, 0x00000000 },
219 { 0x41868c, 1, 0x00000000 },
220 {}
221};
222
223static const struct nvkm_therm_clkgate_init
224gk110_clkgate_slcg_init_gpc_esetup_0[] = {
225 { 0x41882c, 1, 0x00000000 },
226 {}
227};
228
229static const struct nvkm_therm_clkgate_init
230gk110_clkgate_slcg_init_gpc_zcull_0[] = {
231 { 0x418974, 1, 0x00000000 },
232 {}
233};
234
235static const struct nvkm_therm_clkgate_init
236gk110_clkgate_slcg_init_gpc_l1c_0[] = {
237 { 0x419cd8, 2, 0x00000000 },
238 {}
239};
240
241static const struct nvkm_therm_clkgate_init
242gk110_clkgate_slcg_init_gpc_unk_1[] = {
243 { 0x419c74, 1, 0x00000000 },
244 {}
245};
246
247static const struct nvkm_therm_clkgate_init
248gk110_clkgate_slcg_init_gpc_mp_0[] = {
249 { 0x419fd4, 1, 0x00004a4a },
250 { 0x419fdc, 1, 0x00000014 },
251 { 0x419fe4, 1, 0x00000000 },
252 { 0x419ff4, 1, 0x00001724 },
253 {}
254};
255
256static const struct nvkm_therm_clkgate_init
257gk110_clkgate_slcg_init_gpc_ppc_0[] = {
258 { 0x41be2c, 1, 0x00000000 },
259 {}
260};
261
262static const struct nvkm_therm_clkgate_init
263gk110_clkgate_slcg_init_pcounter_0[] = {
264 { 0x1be018, 1, 0x000001ff },
265 { 0x1bc018, 1, 0x000001ff },
266 { 0x1b8018, 1, 0x000001ff },
267 { 0x1b4124, 1, 0x00000000 },
268 {}
269};
270
271static const struct nvkm_therm_clkgate_pack
272gk110_clkgate_pack[] = {
273 { gk104_clkgate_blcg_init_main_0 },
274 { gk104_clkgate_blcg_init_rstr2d_0 },
275 { gk104_clkgate_blcg_init_unk_0 },
276 { gk104_clkgate_blcg_init_gcc_0 },
277 { gk110_clkgate_blcg_init_sked_0 },
278 { gk104_clkgate_blcg_init_unk_1 },
279 { gk104_clkgate_blcg_init_gpc_ctxctl_0 },
280 { gk104_clkgate_blcg_init_gpc_unk_0 },
281 { gk104_clkgate_blcg_init_gpc_esetup_0 },
282 { gk104_clkgate_blcg_init_gpc_tpbus_0 },
283 { gk104_clkgate_blcg_init_gpc_zcull_0 },
284 { gk104_clkgate_blcg_init_gpc_tpconf_0 },
285 { gk104_clkgate_blcg_init_gpc_unk_1 },
286 { gk110_clkgate_blcg_init_gpc_gcc_0 },
287 { gk104_clkgate_blcg_init_gpc_ffb_0 },
288 { gk104_clkgate_blcg_init_gpc_tex_0 },
289 { gk104_clkgate_blcg_init_gpc_poly_0 },
290 { gk110_clkgate_blcg_init_gpc_l1c_0 },
291 { gk104_clkgate_blcg_init_gpc_unk_2 },
292 { gk110_clkgate_blcg_init_gpc_mp_0 },
293 { gk104_clkgate_blcg_init_gpc_ppc_0 },
294 { gk104_clkgate_blcg_init_rop_zrop_0 },
295 { gk104_clkgate_blcg_init_rop_0 },
296 { gk104_clkgate_blcg_init_rop_crop_0 },
297 { gk104_clkgate_blcg_init_pxbar_0 },
298 { gk110_clkgate_slcg_init_main_0 },
299 { gk110_clkgate_slcg_init_unk_0 },
300 { gk110_clkgate_slcg_init_sked_0 },
301 { gk110_clkgate_slcg_init_gpc_ctxctl_0 },
302 { gk110_clkgate_slcg_init_gpc_unk_0 },
303 { gk110_clkgate_slcg_init_gpc_esetup_0 },
304 { gk110_clkgate_slcg_init_gpc_zcull_0 },
305 { gk110_clkgate_slcg_init_gpc_l1c_0 },
306 { gk110_clkgate_slcg_init_gpc_unk_1 },
307 { gk110_clkgate_slcg_init_gpc_mp_0 },
308 { gk110_clkgate_slcg_init_gpc_ppc_0 },
309 { gk110_clkgate_slcg_init_pcounter_0 },
310 {}
311};
312
159/******************************************************************************* 313/*******************************************************************************
160 * PGRAPH engine/subdev functions 314 * PGRAPH engine/subdev functions
161 ******************************************************************************/ 315 ******************************************************************************/
@@ -192,6 +346,7 @@ gk110_gr = {
192 .rops = gf100_gr_rops, 346 .rops = gf100_gr_rops,
193 .ppc_nr = 2, 347 .ppc_nr = 2,
194 .grctx = &gk110_grctx, 348 .grctx = &gk110_grctx,
349 .clkgate_pack = gk110_clkgate_pack,
195 .sclass = { 350 .sclass = {
196 { -1, -1, FERMI_TWOD_A }, 351 { -1, -1, FERMI_TWOD_A },
197 { -1, -1, KEPLER_INLINE_TO_MEMORY_B }, 352 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index dde89a4a0f5b..53859b6254d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -462,7 +462,7 @@ nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon *perfmon,
462 462
463 args->v0.id = di; 463 args->v0.id = di;
464 args->v0.signal_nr = nvkm_perfdom_count_perfsig(dom); 464 args->v0.signal_nr = nvkm_perfdom_count_perfsig(dom);
465 strncpy(args->v0.name, dom->name, sizeof(args->v0.name)); 465 strncpy(args->v0.name, dom->name, sizeof(args->v0.name) - 1);
466 466
467 /* Currently only global counters (PCOUNTER) are implemented 467 /* Currently only global counters (PCOUNTER) are implemented
468 * but this will be different for local counters (MP). */ 468 * but this will be different for local counters (MP). */
@@ -514,7 +514,7 @@ nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon *perfmon,
514 "/%s/%02x", dom->name, si); 514 "/%s/%02x", dom->name, si);
515 } else { 515 } else {
516 strncpy(args->v0.name, sig->name, 516 strncpy(args->v0.name, sig->name,
517 sizeof(args->v0.name)); 517 sizeof(args->v0.name) - 1);
518 } 518 }
519 519
520 args->v0.signal = si; 520 args->v0.signal = si;
@@ -572,7 +572,7 @@ nvkm_perfmon_mthd_query_source(struct nvkm_perfmon *perfmon,
572 572
573 args->v0.source = sig->source[si]; 573 args->v0.source = sig->source[si];
574 args->v0.mask = src->mask; 574 args->v0.mask = src->mask;
575 strncpy(args->v0.name, src->name, sizeof(args->v0.name)); 575 strncpy(args->v0.name, src->name, sizeof(args->v0.name) - 1);
576 } 576 }
577 577
578 if (++si < source_nr) { 578 if (++si < source_nr) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
index 77273b53672c..58a59b7db2e5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
@@ -505,6 +505,7 @@ nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon,
505 ret = msgqueue_0137bca5_new(falcon, sb, queue); 505 ret = msgqueue_0137bca5_new(falcon, sb, queue);
506 break; 506 break;
507 case 0x0148cdec: 507 case 0x0148cdec:
508 case 0x015ccf3e:
508 ret = msgqueue_0148cdec_new(falcon, sb, queue); 509 ret = msgqueue_0148cdec_new(falcon, sb, queue);
509 break; 510 break;
510 default: 511 default:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
index 96e0941c8edd..f0a26881d9b9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
@@ -110,6 +110,7 @@ read_pll(struct gt215_clk *clk, int idx, u32 pll)
110 struct nvkm_device *device = clk->base.subdev.device; 110 struct nvkm_device *device = clk->base.subdev.device;
111 u32 ctrl = nvkm_rd32(device, pll + 0); 111 u32 ctrl = nvkm_rd32(device, pll + 0);
112 u32 sclk = 0, P = 1, N = 1, M = 1; 112 u32 sclk = 0, P = 1, N = 1, M = 1;
113 u32 MP;
113 114
114 if (!(ctrl & 0x00000008)) { 115 if (!(ctrl & 0x00000008)) {
115 if (ctrl & 0x00000001) { 116 if (ctrl & 0x00000001) {
@@ -130,10 +131,12 @@ read_pll(struct gt215_clk *clk, int idx, u32 pll)
130 sclk = read_clk(clk, 0x10 + idx, false); 131 sclk = read_clk(clk, 0x10 + idx, false);
131 } 132 }
132 133
133 if (M * P) 134 MP = M * P;
134 return sclk * N / (M * P);
135 135
136 return 0; 136 if (!MP)
137 return 0;
138
139 return sclk * N / MP;
137} 140}
138 141
139static int 142static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index 2571530e82f1..b4f22cce5d43 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -22,6 +22,7 @@ nvkm-y += nvkm/subdev/fb/mcp89.o
22nvkm-y += nvkm/subdev/fb/gf100.o 22nvkm-y += nvkm/subdev/fb/gf100.o
23nvkm-y += nvkm/subdev/fb/gf108.o 23nvkm-y += nvkm/subdev/fb/gf108.o
24nvkm-y += nvkm/subdev/fb/gk104.o 24nvkm-y += nvkm/subdev/fb/gk104.o
25nvkm-y += nvkm/subdev/fb/gk110.o
25nvkm-y += nvkm/subdev/fb/gk20a.o 26nvkm-y += nvkm/subdev/fb/gk20a.o
26nvkm-y += nvkm/subdev/fb/gm107.o 27nvkm-y += nvkm/subdev/fb/gm107.o
27nvkm-y += nvkm/subdev/fb/gm200.o 28nvkm-y += nvkm/subdev/fb/gm200.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index 47d28c279707..cdc4e0a2cc6b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -26,6 +26,7 @@
26 26
27#include <core/memory.h> 27#include <core/memory.h>
28#include <core/option.h> 28#include <core/option.h>
29#include <subdev/therm.h>
29 30
30void 31void
31gf100_fb_intr(struct nvkm_fb *base) 32gf100_fb_intr(struct nvkm_fb *base)
@@ -92,6 +93,11 @@ gf100_fb_init(struct nvkm_fb *base)
92 93
93 if (fb->r100c10_page) 94 if (fb->r100c10_page)
94 nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8); 95 nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
96
97 if (base->func->clkgate_pack) {
98 nvkm_therm_clkgate_init(device->therm,
99 base->func->clkgate_pack);
100 }
95} 101}
96 102
97void * 103void *
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
index 0a6e8eaad42c..48fd98e08baa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
@@ -20,10 +20,56 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 * Lyude Paul
23 */ 24 */
25#include "gk104.h"
24#include "gf100.h" 26#include "gf100.h"
25#include "ram.h" 27#include "ram.h"
26 28
29/*
30 *******************************************************************************
31 * PGRAPH registers for clockgating
32 *******************************************************************************
33 */
34const struct nvkm_therm_clkgate_init
35gk104_fb_clkgate_blcg_init_unk_0[] = {
36 { 0x100d10, 1, 0x0000c244 },
37 { 0x100d30, 1, 0x0000c242 },
38 { 0x100d3c, 1, 0x00000242 },
39 { 0x100d48, 1, 0x00000242 },
40 { 0x100d1c, 1, 0x00000042 },
41 {}
42};
43
44const struct nvkm_therm_clkgate_init
45gk104_fb_clkgate_blcg_init_vm_0[] = {
46 { 0x100c98, 1, 0x00000242 },
47 {}
48};
49
50const struct nvkm_therm_clkgate_init
51gk104_fb_clkgate_blcg_init_main_0[] = {
52 { 0x10f000, 1, 0x00000042 },
53 { 0x17e030, 1, 0x00000044 },
54 { 0x17e040, 1, 0x00000044 },
55 {}
56};
57
58const struct nvkm_therm_clkgate_init
59gk104_fb_clkgate_blcg_init_bcast_0[] = {
60 { 0x17ea60, 4, 0x00000044 },
61 {}
62};
63
64static const struct nvkm_therm_clkgate_pack
65gk104_fb_clkgate_pack[] = {
66 { gk104_fb_clkgate_blcg_init_unk_0 },
67 { gk104_fb_clkgate_blcg_init_vm_0 },
68 { gk104_fb_clkgate_blcg_init_main_0 },
69 { gk104_fb_clkgate_blcg_init_bcast_0 },
70 {}
71};
72
27static const struct nvkm_fb_func 73static const struct nvkm_fb_func
28gk104_fb = { 74gk104_fb = {
29 .dtor = gf100_fb_dtor, 75 .dtor = gf100_fb_dtor,
@@ -33,6 +79,7 @@ gk104_fb = {
33 .intr = gf100_fb_intr, 79 .intr = gf100_fb_intr,
34 .ram_new = gk104_ram_new, 80 .ram_new = gk104_ram_new,
35 .default_bigpage = 17, 81 .default_bigpage = 17,
82 .clkgate_pack = gk104_fb_clkgate_pack,
36}; 83};
37 84
38int 85int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.h
new file mode 100644
index 000000000000..b3c78e4ff706
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.h
@@ -0,0 +1,35 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Lyude Paul
23 */
24
25#ifndef __GK104_FB_H__
26#define __GK104_FB_H__
27
28#include <subdev/therm.h>
29
30extern const struct nvkm_therm_clkgate_init gk104_fb_clkgate_blcg_init_unk_0[];
31extern const struct nvkm_therm_clkgate_init gk104_fb_clkgate_blcg_init_vm_0[];
32extern const struct nvkm_therm_clkgate_init gk104_fb_clkgate_blcg_init_main_0[];
33extern const struct nvkm_therm_clkgate_init gk104_fb_clkgate_blcg_init_bcast_0[];
34
35#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c
new file mode 100644
index 000000000000..0695e5dd360e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c
@@ -0,0 +1,71 @@
1/*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Lyude Paul
23 */
24#include "gf100.h"
25#include "gk104.h"
26#include "ram.h"
27#include <subdev/therm.h>
28#include <subdev/fb.h>
29
30/*
31 *******************************************************************************
32 * PGRAPH registers for clockgating
33 *******************************************************************************
34 */
35
36static const struct nvkm_therm_clkgate_init
37gk110_fb_clkgate_blcg_init_unk_0[] = {
38 { 0x100d10, 1, 0x0000c242 },
39 { 0x100d30, 1, 0x0000c242 },
40 { 0x100d3c, 1, 0x00000242 },
41 { 0x100d48, 1, 0x0000c242 },
42 { 0x100d1c, 1, 0x00000042 },
43 {}
44};
45
46static const struct nvkm_therm_clkgate_pack
47gk110_fb_clkgate_pack[] = {
48 { gk110_fb_clkgate_blcg_init_unk_0 },
49 { gk104_fb_clkgate_blcg_init_vm_0 },
50 { gk104_fb_clkgate_blcg_init_main_0 },
51 { gk104_fb_clkgate_blcg_init_bcast_0 },
52 {}
53};
54
55static const struct nvkm_fb_func
56gk110_fb = {
57 .dtor = gf100_fb_dtor,
58 .oneinit = gf100_fb_oneinit,
59 .init = gf100_fb_init,
60 .init_page = gf100_fb_init_page,
61 .intr = gf100_fb_intr,
62 .ram_new = gk104_ram_new,
63 .default_bigpage = 17,
64 .clkgate_pack = gk110_fb_clkgate_pack,
65};
66
67int
68gk110_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
69{
70 return gf100_fb_new_(&gk110_fb, device, index, pfb);
71}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 9351188d5d76..414a423e0e55 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -3,6 +3,7 @@
3#define __NVKM_FB_PRIV_H__ 3#define __NVKM_FB_PRIV_H__
4#define nvkm_fb(p) container_of((p), struct nvkm_fb, subdev) 4#define nvkm_fb(p) container_of((p), struct nvkm_fb, subdev)
5#include <subdev/fb.h> 5#include <subdev/fb.h>
6#include <subdev/therm.h>
6struct nvkm_bios; 7struct nvkm_bios;
7 8
8struct nvkm_fb_func { 9struct nvkm_fb_func {
@@ -27,6 +28,7 @@ struct nvkm_fb_func {
27 int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **); 28 int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **);
28 29
29 u8 default_bigpage; 30 u8 default_bigpage;
31 const struct nvkm_therm_clkgate_pack *clkgate_pack;
30}; 32};
31 33
32void nvkm_fb_ctor(const struct nvkm_fb_func *, struct nvkm_device *device, 34void nvkm_fb_ctor(const struct nvkm_fb_func *, struct nvkm_device *device,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
index fa81d0c1ba41..37b201b95f15 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
@@ -106,7 +106,8 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
106 } else 106 } else
107 return ret; 107 return ret;
108 108
109 if (IS_ERR((memory = nvkm_umem_search(client, handle)))) { 109 memory = nvkm_umem_search(client, handle);
110 if (IS_ERR(memory)) {
110 VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory)); 111 VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
111 return PTR_ERR(memory); 112 return PTR_ERR(memory);
112 } 113 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index e35d3e17cd7c..93946dcee319 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -642,7 +642,7 @@ nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
642 else 642 else
643 block = (size >> page[i].shift) << page[i].shift; 643 block = (size >> page[i].shift) << page[i].shift;
644 } else { 644 } else {
645 block = (size >> page[i].shift) << page[i].shift;; 645 block = (size >> page[i].shift) << page[i].shift;
646 } 646 }
647 647
648 /* Perform operation. */ 648 /* Perform operation. */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
index 53d01fb00a8b..1dbe593e5960 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
@@ -47,8 +47,8 @@ static uint32_t gf100_pmu_data[] = {
47 0x00000000, 47 0x00000000,
48 0x00000000, 48 0x00000000,
49 0x584d454d, 49 0x584d454d,
50 0x00000756, 50 0x00000754,
51 0x00000748, 51 0x00000746,
52 0x00000000, 52 0x00000000,
53 0x00000000, 53 0x00000000,
54 0x00000000, 54 0x00000000,
@@ -69,8 +69,8 @@ static uint32_t gf100_pmu_data[] = {
69 0x00000000, 69 0x00000000,
70 0x00000000, 70 0x00000000,
71 0x46524550, 71 0x46524550,
72 0x0000075a,
73 0x00000758, 72 0x00000758,
73 0x00000756,
74 0x00000000, 74 0x00000000,
75 0x00000000, 75 0x00000000,
76 0x00000000, 76 0x00000000,
@@ -91,8 +91,8 @@ static uint32_t gf100_pmu_data[] = {
91 0x00000000, 91 0x00000000,
92 0x00000000, 92 0x00000000,
93 0x5f433249, 93 0x5f433249,
94 0x00000b8a, 94 0x00000b88,
95 0x00000a2d, 95 0x00000a2b,
96 0x00000000, 96 0x00000000,
97 0x00000000, 97 0x00000000,
98 0x00000000, 98 0x00000000,
@@ -113,8 +113,8 @@ static uint32_t gf100_pmu_data[] = {
113 0x00000000, 113 0x00000000,
114 0x00000000, 114 0x00000000,
115 0x54534554, 115 0x54534554,
116 0x00000bb3, 116 0x00000bb1,
117 0x00000b8c, 117 0x00000b8a,
118 0x00000000, 118 0x00000000,
119 0x00000000, 119 0x00000000,
120 0x00000000, 120 0x00000000,
@@ -135,8 +135,8 @@ static uint32_t gf100_pmu_data[] = {
135 0x00000000, 135 0x00000000,
136 0x00000000, 136 0x00000000,
137 0x454c4449, 137 0x454c4449,
138 0x00000bbf,
139 0x00000bbd, 138 0x00000bbd,
139 0x00000bbb,
140 0x00000000, 140 0x00000000,
141 0x00000000, 141 0x00000000,
142 0x00000000, 142 0x00000000,
@@ -237,19 +237,19 @@ static uint32_t gf100_pmu_data[] = {
237 0x000005d3, 237 0x000005d3,
238 0x00000003, 238 0x00000003,
239 0x00000002, 239 0x00000002,
240 0x0000069d, 240 0x0000069b,
241 0x00040004, 241 0x00040004,
242 0x00000000, 242 0x00000000,
243 0x000006b9, 243 0x000006b7,
244 0x00010005, 244 0x00010005,
245 0x00000000, 245 0x00000000,
246 0x000006d6, 246 0x000006d4,
247 0x00010006, 247 0x00010006,
248 0x00000000, 248 0x00000000,
249 0x0000065b, 249 0x0000065b,
250 0x00000007, 250 0x00000007,
251 0x00000000, 251 0x00000000,
252 0x000006e1, 252 0x000006df,
253/* 0x03c4: memx_func_tail */ 253/* 0x03c4: memx_func_tail */
254/* 0x03c4: memx_ts_start */ 254/* 0x03c4: memx_ts_start */
255 0x00000000, 255 0x00000000,
@@ -1373,432 +1373,432 @@ static uint32_t gf100_pmu_code[] = {
1373/* 0x065b: memx_func_wait_vblank */ 1373/* 0x065b: memx_func_wait_vblank */
1374 0x9800f840, 1374 0x9800f840,
1375 0x66b00016, 1375 0x66b00016,
1376 0x130bf400, 1376 0x120bf400,
1377 0xf40166b0, 1377 0xf40166b0,
1378 0x0ef4060b, 1378 0x0ef4060b,
1379/* 0x066d: memx_func_wait_vblank_head1 */ 1379/* 0x066d: memx_func_wait_vblank_head1 */
1380 0x2077f12e, 1380 0x2077f02c,
1381 0x070ef400, 1381/* 0x0673: memx_func_wait_vblank_head0 */
1382/* 0x0674: memx_func_wait_vblank_head0 */ 1382 0xf0060ef4,
1383 0x000877f1, 1383/* 0x0676: memx_func_wait_vblank_0 */
1384/* 0x0678: memx_func_wait_vblank_0 */ 1384 0x67f10877,
1385 0x07c467f1, 1385 0x64b607c4,
1386 0xcf0664b6, 1386 0x0066cf06,
1387 0x67fd0066, 1387 0xf40467fd,
1388 0xf31bf404, 1388/* 0x0686: memx_func_wait_vblank_1 */
1389/* 0x0688: memx_func_wait_vblank_1 */ 1389 0x67f1f31b,
1390 0x07c467f1, 1390 0x64b607c4,
1391 0xcf0664b6, 1391 0x0066cf06,
1392 0x67fd0066, 1392 0xf40467fd,
1393 0xf30bf404, 1393/* 0x0696: memx_func_wait_vblank_fini */
1394/* 0x0698: memx_func_wait_vblank_fini */ 1394 0x10b6f30b,
1395 0xf80410b6, 1395/* 0x069b: memx_func_wr32 */
1396/* 0x069d: memx_func_wr32 */ 1396 0x9800f804,
1397 0x00169800, 1397 0x15980016,
1398 0xb6011598, 1398 0x0810b601,
1399 0x60f90810, 1399 0x50f960f9,
1400 0xd0fc50f9, 1400 0xe0fcd0fc,
1401 0x21f4e0fc, 1401 0xb64021f4,
1402 0x0242b640, 1402 0x1bf40242,
1403 0xf8e91bf4, 1403/* 0x06b7: memx_func_wait */
1404/* 0x06b9: memx_func_wait */ 1404 0xf000f8e9,
1405 0x2c87f000, 1405 0x84b62c87,
1406 0xcf0684b6, 1406 0x0088cf06,
1407 0x1e980088, 1407 0x98001e98,
1408 0x011d9800, 1408 0x1c98011d,
1409 0x98021c98, 1409 0x031b9802,
1410 0x10b6031b, 1410 0xf41010b6,
1411 0xa321f410, 1411 0x00f8a321,
1412/* 0x06d6: memx_func_delay */ 1412/* 0x06d4: memx_func_delay */
1413 0x1e9800f8, 1413 0xb6001e98,
1414 0x0410b600, 1414 0x21f40410,
1415 0xf87e21f4, 1415/* 0x06df: memx_func_train */
1416/* 0x06e1: memx_func_train */ 1416 0xf800f87e,
1417/* 0x06e3: memx_exec */ 1417/* 0x06e1: memx_exec */
1418 0xf900f800, 1418 0xf9e0f900,
1419 0xb9d0f9e0, 1419 0x02c1b9d0,
1420 0xb2b902c1, 1420/* 0x06eb: memx_exec_next */
1421/* 0x06ed: memx_exec_next */ 1421 0x9802b2b9,
1422 0x00139802, 1422 0x10b60013,
1423 0xe70410b6, 1423 0xf034e704,
1424 0xe701f034, 1424 0xe033e701,
1425 0xb601e033, 1425 0x0132b601,
1426 0x30f00132, 1426 0x980c30f0,
1427 0xde35980c, 1427 0x55f9de35,
1428 0x12b855f9, 1428 0xf40612b8,
1429 0xe41ef406, 1429 0x0b98e41e,
1430 0x98f10b98, 1430 0xf20c98f1,
1431 0xcbbbf20c, 1431 0xf102cbbb,
1432 0xc4b7f102, 1432 0xb607c4b7,
1433 0x06b4b607, 1433 0xbbcf06b4,
1434 0xfc00bbcf, 1434 0xfcd0fc00,
1435 0xf5e0fcd0, 1435 0x3621f5e0,
1436 0xf8033621, 1436/* 0x0727: memx_info */
1437/* 0x0729: memx_info */ 1437 0x7000f803,
1438 0x01c67000, 1438 0x0bf401c6,
1439/* 0x072f: memx_info_data */ 1439/* 0x072d: memx_info_data */
1440 0xf10e0bf4, 1440 0xccc7f10e,
1441 0xf103ccc7, 1441 0x00b7f103,
1442 0xf40800b7, 1442 0x0b0ef408,
1443/* 0x073a: memx_info_train */ 1443/* 0x0738: memx_info_train */
1444 0xc7f10b0e, 1444 0x0bccc7f1,
1445 0xb7f10bcc, 1445 0x0100b7f1,
1446/* 0x0742: memx_info_send */ 1446/* 0x0740: memx_info_send */
1447 0x21f50100, 1447 0x033621f5,
1448 0x00f80336, 1448/* 0x0746: memx_recv */
1449/* 0x0748: memx_recv */ 1449 0xd6b000f8,
1450 0xf401d6b0, 1450 0x980bf401,
1451 0xd6b0980b, 1451 0xf400d6b0,
1452 0xd80bf400, 1452 0x00f8d80b,
1453/* 0x0756: memx_init */ 1453/* 0x0754: memx_init */
1454 0x00f800f8, 1454/* 0x0756: perf_recv */
1455/* 0x0758: perf_recv */
1456/* 0x075a: perf_init */
1457 0x00f800f8, 1455 0x00f800f8,
1458/* 0x075c: i2c_drive_scl */ 1456/* 0x0758: perf_init */
1459 0xf40036b0, 1457/* 0x075a: i2c_drive_scl */
1460 0x07f1110b,
1461 0x04b607e0,
1462 0x0001d006,
1463 0x00f804bd,
1464/* 0x0770: i2c_drive_scl_lo */
1465 0x07e407f1,
1466 0xd00604b6,
1467 0x04bd0001,
1468/* 0x077e: i2c_drive_sda */
1469 0x36b000f8, 1458 0x36b000f8,
1470 0x110bf400, 1459 0x110bf400,
1471 0x07e007f1, 1460 0x07e007f1,
1472 0xd00604b6, 1461 0xd00604b6,
1473 0x04bd0002, 1462 0x04bd0001,
1474/* 0x0792: i2c_drive_sda_lo */ 1463/* 0x076e: i2c_drive_scl_lo */
1475 0x07f100f8, 1464 0x07f100f8,
1476 0x04b607e4, 1465 0x04b607e4,
1466 0x0001d006,
1467 0x00f804bd,
1468/* 0x077c: i2c_drive_sda */
1469 0xf40036b0,
1470 0x07f1110b,
1471 0x04b607e0,
1477 0x0002d006, 1472 0x0002d006,
1478 0x00f804bd, 1473 0x00f804bd,
1479/* 0x07a0: i2c_sense_scl */ 1474/* 0x0790: i2c_drive_sda_lo */
1480 0xf10132f4, 1475 0x07e407f1,
1481 0xb607c437, 1476 0xd00604b6,
1482 0x33cf0634, 1477 0x04bd0002,
1483 0x0431fd00, 1478/* 0x079e: i2c_sense_scl */
1484 0xf4060bf4, 1479 0x32f400f8,
1485/* 0x07b6: i2c_sense_scl_done */ 1480 0xc437f101,
1486 0x00f80131, 1481 0x0634b607,
1487/* 0x07b8: i2c_sense_sda */ 1482 0xfd0033cf,
1488 0xf10132f4, 1483 0x0bf40431,
1489 0xb607c437, 1484 0x0131f406,
1490 0x33cf0634, 1485/* 0x07b4: i2c_sense_scl_done */
1491 0x0432fd00, 1486/* 0x07b6: i2c_sense_sda */
1492 0xf4060bf4, 1487 0x32f400f8,
1493/* 0x07ce: i2c_sense_sda_done */ 1488 0xc437f101,
1494 0x00f80131, 1489 0x0634b607,
1495/* 0x07d0: i2c_raise_scl */ 1490 0xfd0033cf,
1496 0x47f140f9, 1491 0x0bf40432,
1497 0x37f00898, 1492 0x0131f406,
1498 0x5c21f501, 1493/* 0x07cc: i2c_sense_sda_done */
1499/* 0x07dd: i2c_raise_scl_wait */ 1494/* 0x07ce: i2c_raise_scl */
1500 0xe8e7f107, 1495 0x40f900f8,
1501 0x7e21f403, 1496 0x089847f1,
1502 0x07a021f5, 1497 0xf50137f0,
1503 0xb60901f4, 1498/* 0x07db: i2c_raise_scl_wait */
1504 0x1bf40142, 1499 0xf1075a21,
1505/* 0x07f1: i2c_raise_scl_done */ 1500 0xf403e8e7,
1506 0xf840fcef, 1501 0x21f57e21,
1507/* 0x07f5: i2c_start */ 1502 0x01f4079e,
1508 0xa021f500, 1503 0x0142b609,
1509 0x0d11f407, 1504/* 0x07ef: i2c_raise_scl_done */
1510 0x07b821f5, 1505 0xfcef1bf4,
1511 0xf40611f4, 1506/* 0x07f3: i2c_start */
1512/* 0x0806: i2c_start_rep */ 1507 0xf500f840,
1513 0x37f0300e, 1508 0xf4079e21,
1514 0x5c21f500, 1509 0x21f50d11,
1515 0x0137f007, 1510 0x11f407b6,
1516 0x077e21f5, 1511 0x300ef406,
1517 0xb60076bb, 1512/* 0x0804: i2c_start_rep */
1518 0x50f90465, 1513 0xf50037f0,
1519 0xbb046594, 1514 0xf0075a21,
1520 0x50bd0256, 1515 0x21f50137,
1521 0xfc0475fd, 1516 0x76bb077c,
1522 0xd021f550, 1517 0x0465b600,
1523 0x0464b607, 1518 0x659450f9,
1524/* 0x0833: i2c_start_send */ 1519 0x0256bb04,
1525 0xf01f11f4, 1520 0x75fd50bd,
1521 0xf550fc04,
1522 0xb607ce21,
1523 0x11f40464,
1524/* 0x0831: i2c_start_send */
1525 0x0037f01f,
1526 0x077c21f5,
1527 0x1388e7f1,
1528 0xf07e21f4,
1526 0x21f50037, 1529 0x21f50037,
1527 0xe7f1077e, 1530 0xe7f1075a,
1528 0x21f41388, 1531 0x21f41388,
1529 0x0037f07e, 1532/* 0x084d: i2c_start_out */
1530 0x075c21f5, 1533/* 0x084f: i2c_stop */
1531 0x1388e7f1, 1534 0xf000f87e,
1532/* 0x084f: i2c_start_out */ 1535 0x21f50037,
1533 0xf87e21f4, 1536 0x37f0075a,
1534/* 0x0851: i2c_stop */ 1537 0x7c21f500,
1535 0x0037f000, 1538 0xe8e7f107,
1536 0x075c21f5, 1539 0x7e21f403,
1537 0xf50037f0,
1538 0xf1077e21,
1539 0xf403e8e7,
1540 0x37f07e21,
1541 0x5c21f501,
1542 0x88e7f107,
1543 0x7e21f413,
1544 0xf50137f0, 1540 0xf50137f0,
1545 0xf1077e21, 1541 0xf1075a21,
1546 0xf41388e7, 1542 0xf41388e7,
1547 0x00f87e21, 1543 0x37f07e21,
1548/* 0x0884: i2c_bitw */ 1544 0x7c21f501,
1549 0x077e21f5,
1550 0x03e8e7f1,
1551 0xbb7e21f4,
1552 0x65b60076,
1553 0x9450f904,
1554 0x56bb0465,
1555 0xfd50bd02,
1556 0x50fc0475,
1557 0x07d021f5,
1558 0xf40464b6,
1559 0xe7f11811,
1560 0x21f41388,
1561 0x0037f07e,
1562 0x075c21f5,
1563 0x1388e7f1,
1564/* 0x08c3: i2c_bitw_out */
1565 0xf87e21f4,
1566/* 0x08c5: i2c_bitr */
1567 0x0137f000,
1568 0x077e21f5,
1569 0x03e8e7f1,
1570 0xbb7e21f4,
1571 0x65b60076,
1572 0x9450f904,
1573 0x56bb0465,
1574 0xfd50bd02,
1575 0x50fc0475,
1576 0x07d021f5,
1577 0xf40464b6,
1578 0x21f51b11,
1579 0x37f007b8,
1580 0x5c21f500,
1581 0x88e7f107, 1545 0x88e7f107,
1582 0x7e21f413, 1546 0x7e21f413,
1583 0xf4013cf0, 1547/* 0x0882: i2c_bitw */
1584/* 0x090a: i2c_bitr_done */ 1548 0x21f500f8,
1585 0x00f80131, 1549 0xe7f1077c,
1586/* 0x090c: i2c_get_byte */ 1550 0x21f403e8,
1587 0xf00057f0, 1551 0x0076bb7e,
1588/* 0x0912: i2c_get_byte_next */
1589 0x54b60847,
1590 0x0076bb01,
1591 0xf90465b6, 1552 0xf90465b6,
1592 0x04659450, 1553 0x04659450,
1593 0xbd0256bb, 1554 0xbd0256bb,
1594 0x0475fd50, 1555 0x0475fd50,
1595 0x21f550fc, 1556 0x21f550fc,
1596 0x64b608c5, 1557 0x64b607ce,
1597 0x2b11f404, 1558 0x1811f404,
1598 0xb60553fd, 1559 0x1388e7f1,
1599 0x1bf40142, 1560 0xf07e21f4,
1600 0x0137f0d8, 1561 0x21f50037,
1601 0xb60076bb, 1562 0xe7f1075a,
1602 0x50f90465, 1563 0x21f41388,
1603 0xbb046594, 1564/* 0x08c1: i2c_bitw_out */
1604 0x50bd0256, 1565/* 0x08c3: i2c_bitr */
1605 0xfc0475fd, 1566 0xf000f87e,
1606 0x8421f550, 1567 0x21f50137,
1607 0x0464b608, 1568 0xe7f1077c,
1608/* 0x095c: i2c_get_byte_done */ 1569 0x21f403e8,
1609/* 0x095e: i2c_put_byte */ 1570 0x0076bb7e,
1610 0x47f000f8, 1571 0xf90465b6,
1611/* 0x0961: i2c_put_byte_next */ 1572 0x04659450,
1612 0x0142b608, 1573 0xbd0256bb,
1613 0xbb3854ff, 1574 0x0475fd50,
1575 0x21f550fc,
1576 0x64b607ce,
1577 0x1b11f404,
1578 0x07b621f5,
1579 0xf50037f0,
1580 0xf1075a21,
1581 0xf41388e7,
1582 0x3cf07e21,
1583 0x0131f401,
1584/* 0x0908: i2c_bitr_done */
1585/* 0x090a: i2c_get_byte */
1586 0x57f000f8,
1587 0x0847f000,
1588/* 0x0910: i2c_get_byte_next */
1589 0xbb0154b6,
1614 0x65b60076, 1590 0x65b60076,
1615 0x9450f904, 1591 0x9450f904,
1616 0x56bb0465, 1592 0x56bb0465,
1617 0xfd50bd02, 1593 0xfd50bd02,
1618 0x50fc0475, 1594 0x50fc0475,
1619 0x088421f5, 1595 0x08c321f5,
1620 0xf40464b6, 1596 0xf40464b6,
1621 0x46b03411, 1597 0x53fd2b11,
1622 0xd81bf400, 1598 0x0142b605,
1623 0xb60076bb, 1599 0xf0d81bf4,
1624 0x50f90465, 1600 0x76bb0137,
1625 0xbb046594, 1601 0x0465b600,
1626 0x50bd0256, 1602 0x659450f9,
1627 0xfc0475fd, 1603 0x0256bb04,
1628 0xc521f550, 1604 0x75fd50bd,
1629 0x0464b608, 1605 0xf550fc04,
1630 0xbb0f11f4, 1606 0xb6088221,
1631 0x36b00076, 1607/* 0x095a: i2c_get_byte_done */
1632 0x061bf401, 1608 0x00f80464,
1633/* 0x09b7: i2c_put_byte_done */ 1609/* 0x095c: i2c_put_byte */
1634 0xf80132f4, 1610/* 0x095f: i2c_put_byte_next */
1635/* 0x09b9: i2c_addr */ 1611 0xb60847f0,
1636 0x0076bb00, 1612 0x54ff0142,
1613 0x0076bb38,
1637 0xf90465b6, 1614 0xf90465b6,
1638 0x04659450, 1615 0x04659450,
1639 0xbd0256bb, 1616 0xbd0256bb,
1640 0x0475fd50, 1617 0x0475fd50,
1641 0x21f550fc, 1618 0x21f550fc,
1642 0x64b607f5, 1619 0x64b60882,
1643 0x2911f404, 1620 0x3411f404,
1644 0x012ec3e7, 1621 0xf40046b0,
1645 0xfd0134b6, 1622 0x76bbd81b,
1646 0x76bb0553,
1647 0x0465b600, 1623 0x0465b600,
1648 0x659450f9, 1624 0x659450f9,
1649 0x0256bb04, 1625 0x0256bb04,
1650 0x75fd50bd, 1626 0x75fd50bd,
1651 0xf550fc04, 1627 0xf550fc04,
1652 0xb6095e21, 1628 0xb608c321,
1653/* 0x09fe: i2c_addr_done */ 1629 0x11f40464,
1654 0x00f80464, 1630 0x0076bb0f,
1655/* 0x0a00: i2c_acquire_addr */ 1631 0xf40136b0,
1656 0xb6f8cec7, 1632 0x32f4061b,
1657 0xe0b702e4, 1633/* 0x09b5: i2c_put_byte_done */
1658 0xee980d1c, 1634/* 0x09b7: i2c_addr */
1659/* 0x0a0f: i2c_acquire */ 1635 0xbb00f801,
1660 0xf500f800,
1661 0xf40a0021,
1662 0xd9f00421,
1663 0x4021f403,
1664/* 0x0a1e: i2c_release */
1665 0x21f500f8,
1666 0x21f40a00,
1667 0x03daf004,
1668 0xf84021f4,
1669/* 0x0a2d: i2c_recv */
1670 0x0132f400,
1671 0xb6f8c1c7,
1672 0x16b00214,
1673 0x3a1ff528,
1674 0xf413a001,
1675 0x0032980c,
1676 0x0ccc13a0,
1677 0xf4003198,
1678 0xd0f90231,
1679 0xd0f9e0f9,
1680 0x000067f1,
1681 0x100063f1,
1682 0xbb016792,
1683 0x65b60076, 1636 0x65b60076,
1684 0x9450f904, 1637 0x9450f904,
1685 0x56bb0465, 1638 0x56bb0465,
1686 0xfd50bd02, 1639 0xfd50bd02,
1687 0x50fc0475, 1640 0x50fc0475,
1688 0x0a0f21f5, 1641 0x07f321f5,
1689 0xfc0464b6, 1642 0xf40464b6,
1690 0x00d6b0d0, 1643 0xc3e72911,
1691 0x00b31bf5, 1644 0x34b6012e,
1692 0xbb0057f0, 1645 0x0553fd01,
1693 0x65b60076, 1646 0xb60076bb,
1694 0x9450f904, 1647 0x50f90465,
1695 0x56bb0465, 1648 0xbb046594,
1696 0xfd50bd02, 1649 0x50bd0256,
1697 0x50fc0475, 1650 0xfc0475fd,
1698 0x09b921f5, 1651 0x5c21f550,
1699 0xf50464b6, 1652 0x0464b609,
1700 0xc700d011, 1653/* 0x09fc: i2c_addr_done */
1701 0x76bbe0c5, 1654/* 0x09fe: i2c_acquire_addr */
1702 0x0465b600, 1655 0xcec700f8,
1703 0x659450f9, 1656 0x02e4b6f8,
1704 0x0256bb04, 1657 0x0d1ce0b7,
1705 0x75fd50bd, 1658 0xf800ee98,
1706 0xf550fc04, 1659/* 0x0a0d: i2c_acquire */
1707 0xb6095e21, 1660 0xfe21f500,
1708 0x11f50464, 1661 0x0421f409,
1709 0x57f000ad, 1662 0xf403d9f0,
1663 0x00f84021,
1664/* 0x0a1c: i2c_release */
1665 0x09fe21f5,
1666 0xf00421f4,
1667 0x21f403da,
1668/* 0x0a2b: i2c_recv */
1669 0xf400f840,
1670 0xc1c70132,
1671 0x0214b6f8,
1672 0xf52816b0,
1673 0xa0013a1f,
1674 0x980cf413,
1675 0x13a00032,
1676 0x31980ccc,
1677 0x0231f400,
1678 0xe0f9d0f9,
1679 0x67f1d0f9,
1680 0x63f10000,
1681 0x67921000,
1710 0x0076bb01, 1682 0x0076bb01,
1711 0xf90465b6, 1683 0xf90465b6,
1712 0x04659450, 1684 0x04659450,
1713 0xbd0256bb, 1685 0xbd0256bb,
1714 0x0475fd50, 1686 0x0475fd50,
1715 0x21f550fc, 1687 0x21f550fc,
1716 0x64b609b9, 1688 0x64b60a0d,
1717 0x8a11f504, 1689 0xb0d0fc04,
1690 0x1bf500d6,
1691 0x57f000b3,
1718 0x0076bb00, 1692 0x0076bb00,
1719 0xf90465b6, 1693 0xf90465b6,
1720 0x04659450, 1694 0x04659450,
1721 0xbd0256bb, 1695 0xbd0256bb,
1722 0x0475fd50, 1696 0x0475fd50,
1723 0x21f550fc, 1697 0x21f550fc,
1724 0x64b6090c, 1698 0x64b609b7,
1725 0x6a11f404, 1699 0xd011f504,
1726 0xbbe05bcb, 1700 0xe0c5c700,
1701 0xb60076bb,
1702 0x50f90465,
1703 0xbb046594,
1704 0x50bd0256,
1705 0xfc0475fd,
1706 0x5c21f550,
1707 0x0464b609,
1708 0x00ad11f5,
1709 0xbb0157f0,
1727 0x65b60076, 1710 0x65b60076,
1728 0x9450f904, 1711 0x9450f904,
1729 0x56bb0465, 1712 0x56bb0465,
1730 0xfd50bd02, 1713 0xfd50bd02,
1731 0x50fc0475, 1714 0x50fc0475,
1732 0x085121f5, 1715 0x09b721f5,
1733 0xb90464b6, 1716 0xf50464b6,
1734 0x74bd025b, 1717 0xbb008a11,
1735/* 0x0b33: i2c_recv_not_rd08 */ 1718 0x65b60076,
1736 0xb0430ef4, 1719 0x9450f904,
1737 0x1bf401d6, 1720 0x56bb0465,
1738 0x0057f03d, 1721 0xfd50bd02,
1739 0x09b921f5, 1722 0x50fc0475,
1740 0xc73311f4, 1723 0x090a21f5,
1741 0x21f5e0c5, 1724 0xf40464b6,
1742 0x11f4095e, 1725 0x5bcb6a11,
1743 0x0057f029, 1726 0x0076bbe0,
1744 0x09b921f5, 1727 0xf90465b6,
1745 0xc71f11f4, 1728 0x04659450,
1746 0x21f5e0b5, 1729 0xbd0256bb,
1747 0x11f4095e, 1730 0x0475fd50,
1748 0x5121f515, 1731 0x21f550fc,
1749 0xc774bd08, 1732 0x64b6084f,
1750 0x1bf408c5, 1733 0x025bb904,
1751 0x0232f409, 1734 0x0ef474bd,
1752/* 0x0b73: i2c_recv_not_wr08 */ 1735/* 0x0b31: i2c_recv_not_rd08 */
1753/* 0x0b73: i2c_recv_done */ 1736 0x01d6b043,
1754 0xc7030ef4, 1737 0xf03d1bf4,
1755 0x21f5f8ce, 1738 0x21f50057,
1756 0xe0fc0a1e, 1739 0x11f409b7,
1757 0x12f4d0fc, 1740 0xe0c5c733,
1758 0x027cb90a, 1741 0x095c21f5,
1759 0x033621f5, 1742 0xf02911f4,
1760/* 0x0b88: i2c_recv_exit */ 1743 0x21f50057,
1761/* 0x0b8a: i2c_init */ 1744 0x11f409b7,
1762 0x00f800f8, 1745 0xe0b5c71f,
1763/* 0x0b8c: test_recv */ 1746 0x095c21f5,
1764 0x05d817f1, 1747 0xf51511f4,
1748 0xbd084f21,
1749 0x08c5c774,
1750 0xf4091bf4,
1751 0x0ef40232,
1752/* 0x0b71: i2c_recv_not_wr08 */
1753/* 0x0b71: i2c_recv_done */
1754 0xf8cec703,
1755 0x0a1c21f5,
1756 0xd0fce0fc,
1757 0xb90a12f4,
1758 0x21f5027c,
1759/* 0x0b86: i2c_recv_exit */
1760 0x00f80336,
1761/* 0x0b88: i2c_init */
1762/* 0x0b8a: test_recv */
1763 0x17f100f8,
1764 0x14b605d8,
1765 0x0011cf06,
1766 0xf10110b6,
1767 0xb605d807,
1768 0x01d00604,
1769 0xf104bd00,
1770 0xf1d900e7,
1771 0xf5134fe3,
1772 0xf8025621,
1773/* 0x0bb1: test_init */
1774 0x00e7f100,
1775 0x5621f508,
1776/* 0x0bbb: idle_recv */
1777 0xf800f802,
1778/* 0x0bbd: idle */
1779 0x0031f400,
1780 0x05d417f1,
1765 0xcf0614b6, 1781 0xcf0614b6,
1766 0x10b60011, 1782 0x10b60011,
1767 0xd807f101, 1783 0xd407f101,
1768 0x0604b605, 1784 0x0604b605,
1769 0xbd0001d0, 1785 0xbd0001d0,
1770 0x00e7f104, 1786/* 0x0bd9: idle_loop */
1771 0x4fe3f1d9, 1787 0x5817f004,
1772 0x5621f513, 1788/* 0x0bdf: idle_proc */
1773/* 0x0bb3: test_init */ 1789/* 0x0bdf: idle_proc_exec */
1774 0xf100f802, 1790 0xf90232f4,
1775 0xf50800e7, 1791 0x021eb910,
1776 0xf8025621, 1792 0x033f21f5,
1777/* 0x0bbd: idle_recv */ 1793 0x11f410fc,
1778/* 0x0bbf: idle */ 1794 0x0231f409,
1779 0xf400f800, 1795/* 0x0bf3: idle_proc_next */
1780 0x17f10031, 1796 0xb6ef0ef4,
1781 0x14b605d4, 1797 0x1fb85810,
1782 0x0011cf06, 1798 0xe61bf406,
1783 0xf10110b6, 1799 0xf4dd02f4,
1784 0xb605d407, 1800 0x0ef40028,
1785 0x01d00604, 1801 0x000000bb,
1786/* 0x0bdb: idle_loop */
1787 0xf004bd00,
1788 0x32f45817,
1789/* 0x0be1: idle_proc */
1790/* 0x0be1: idle_proc_exec */
1791 0xb910f902,
1792 0x21f5021e,
1793 0x10fc033f,
1794 0xf40911f4,
1795 0x0ef40231,
1796/* 0x0bf5: idle_proc_next */
1797 0x5810b6ef,
1798 0xf4061fb8,
1799 0x02f4e61b,
1800 0x0028f4dd,
1801 0x00bb0ef4,
1802 0x00000000, 1802 0x00000000,
1803 0x00000000, 1803 0x00000000,
1804 0x00000000, 1804 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
index c4edbc79e41a..e0222cb832fb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
@@ -47,8 +47,8 @@ static uint32_t gk208_pmu_data[] = {
47 0x00000000, 47 0x00000000,
48 0x00000000, 48 0x00000000,
49 0x584d454d, 49 0x584d454d,
50 0x000005f3, 50 0x000005ee,
51 0x000005e5, 51 0x000005e0,
52 0x00000000, 52 0x00000000,
53 0x00000000, 53 0x00000000,
54 0x00000000, 54 0x00000000,
@@ -69,8 +69,8 @@ static uint32_t gk208_pmu_data[] = {
69 0x00000000, 69 0x00000000,
70 0x00000000, 70 0x00000000,
71 0x46524550, 71 0x46524550,
72 0x000005f7, 72 0x000005f2,
73 0x000005f5, 73 0x000005f0,
74 0x00000000, 74 0x00000000,
75 0x00000000, 75 0x00000000,
76 0x00000000, 76 0x00000000,
@@ -91,8 +91,8 @@ static uint32_t gk208_pmu_data[] = {
91 0x00000000, 91 0x00000000,
92 0x00000000, 92 0x00000000,
93 0x5f433249, 93 0x5f433249,
94 0x000009f8, 94 0x000009f3,
95 0x000008a2, 95 0x0000089d,
96 0x00000000, 96 0x00000000,
97 0x00000000, 97 0x00000000,
98 0x00000000, 98 0x00000000,
@@ -113,8 +113,8 @@ static uint32_t gk208_pmu_data[] = {
113 0x00000000, 113 0x00000000,
114 0x00000000, 114 0x00000000,
115 0x54534554, 115 0x54534554,
116 0x00000a16, 116 0x00000a11,
117 0x000009fa, 117 0x000009f5,
118 0x00000000, 118 0x00000000,
119 0x00000000, 119 0x00000000,
120 0x00000000, 120 0x00000000,
@@ -135,8 +135,8 @@ static uint32_t gk208_pmu_data[] = {
135 0x00000000, 135 0x00000000,
136 0x00000000, 136 0x00000000,
137 0x454c4449, 137 0x454c4449,
138 0x00000a21, 138 0x00000a1c,
139 0x00000a1f, 139 0x00000a1a,
140 0x00000000, 140 0x00000000,
141 0x00000000, 141 0x00000000,
142 0x00000000, 142 0x00000000,
@@ -234,22 +234,22 @@ static uint32_t gk208_pmu_data[] = {
234/* 0x037c: memx_func_next */ 234/* 0x037c: memx_func_next */
235 0x00000002, 235 0x00000002,
236 0x00000000, 236 0x00000000,
237 0x000004cf, 237 0x000004cc,
238 0x00000003, 238 0x00000003,
239 0x00000002, 239 0x00000002,
240 0x00000546, 240 0x00000541,
241 0x00040004, 241 0x00040004,
242 0x00000000, 242 0x00000000,
243 0x00000563, 243 0x0000055e,
244 0x00010005, 244 0x00010005,
245 0x00000000, 245 0x00000000,
246 0x0000057d, 246 0x00000578,
247 0x00010006, 247 0x00010006,
248 0x00000000, 248 0x00000000,
249 0x00000541, 249 0x0000053c,
250 0x00000007, 250 0x00000007,
251 0x00000000, 251 0x00000000,
252 0x00000589, 252 0x00000584,
253/* 0x03c4: memx_func_tail */ 253/* 0x03c4: memx_func_tail */
254/* 0x03c4: memx_ts_start */ 254/* 0x03c4: memx_ts_start */
255 0x00000000, 255 0x00000000,
@@ -1239,454 +1239,454 @@ static uint32_t gk208_pmu_code[] = {
1239 0x0001f604, 1239 0x0001f604,
1240 0x00f804bd, 1240 0x00f804bd,
1241/* 0x045c: memx_func_enter */ 1241/* 0x045c: memx_func_enter */
1242 0x162067f1, 1242 0x47162046,
1243 0xf55d77f1, 1243 0x6eb2f55d,
1244 0x047e6eb2, 1244 0x0000047e,
1245 0xd8b20000, 1245 0x87fdd8b2,
1246 0xf90487fd, 1246 0xf960f904,
1247 0xfc80f960, 1247 0xfcd0fc80,
1248 0x7ee0fcd0, 1248 0x002d7ee0,
1249 0x0700002d, 1249 0xb2fe0700,
1250 0x7e6eb2fe, 1250 0x00047e6e,
1251 0xfdd8b200,
1252 0x60f90487,
1253 0xd0fc80f9,
1254 0x2d7ee0fc,
1255 0xf0460000,
1256 0x7e6eb226,
1251 0xb2000004, 1257 0xb2000004,
1252 0x0487fdd8, 1258 0x0487fdd8,
1253 0x80f960f9, 1259 0x80f960f9,
1254 0xe0fcd0fc, 1260 0xe0fcd0fc,
1255 0x00002d7e, 1261 0x00002d7e,
1256 0x26f067f1, 1262 0xe0400406,
1257 0x047e6eb2, 1263 0x0006f607,
1258 0xd8b20000, 1264/* 0x04b6: memx_func_enter_wait */
1259 0xf90487fd, 1265 0xc04604bd,
1260 0xfc80f960, 1266 0x0066cf07,
1261 0x7ee0fcd0, 1267 0xf40464f0,
1262 0x0600002d, 1268 0x2c06f70b,
1263 0x07e04004, 1269 0xb50066cf,
1264 0xbd0006f6, 1270 0x00f8f106,
1265/* 0x04b9: memx_func_enter_wait */ 1271/* 0x04cc: memx_func_leave */
1266 0x07c04604, 1272 0x66cf2c06,
1267 0xf00066cf, 1273 0xf206b500,
1268 0x0bf40464, 1274 0xe4400406,
1269 0xcf2c06f7, 1275 0x0006f607,
1270 0x06b50066, 1276/* 0x04de: memx_func_leave_wait */
1271/* 0x04cf: memx_func_leave */ 1277 0xc04604bd,
1272 0x0600f8f1, 1278 0x0066cf07,
1273 0x0066cf2c, 1279 0xf40464f0,
1274 0x06f206b5, 1280 0xf046f71b,
1275 0x07e44004,
1276 0xbd0006f6,
1277/* 0x04e1: memx_func_leave_wait */
1278 0x07c04604,
1279 0xf00066cf,
1280 0x1bf40464,
1281 0xf067f1f7,
1282 0xb2010726, 1281 0xb2010726,
1283 0x00047e6e, 1282 0x00047e6e,
1284 0xfdd8b200, 1283 0xfdd8b200,
1285 0x60f90587, 1284 0x60f90587,
1286 0xd0fc80f9, 1285 0xd0fc80f9,
1287 0x2d7ee0fc, 1286 0x2d7ee0fc,
1288 0x67f10000, 1287 0x20460000,
1289 0x6eb21620, 1288 0x7e6eb216,
1290 0x0000047e, 1289 0xb2000004,
1291 0x87fdd8b2, 1290 0x0587fdd8,
1292 0xf960f905, 1291 0x80f960f9,
1293 0xfcd0fc80, 1292 0xe0fcd0fc,
1294 0x002d7ee0, 1293 0x00002d7e,
1295 0x0aa24700, 1294 0xb20aa247,
1296 0x047e6eb2, 1295 0x00047e6e,
1297 0xd8b20000, 1296 0xfdd8b200,
1298 0xf90587fd, 1297 0x60f90587,
1299 0xfc80f960, 1298 0xd0fc80f9,
1300 0x7ee0fcd0, 1299 0x2d7ee0fc,
1301 0xf800002d, 1300 0x00f80000,
1302/* 0x0541: memx_func_wait_vblank */ 1301/* 0x053c: memx_func_wait_vblank */
1302 0xf80410b6,
1303/* 0x0541: memx_func_wr32 */
1304 0x00169800,
1305 0xb6011598,
1306 0x60f90810,
1307 0xd0fc50f9,
1308 0x2d7ee0fc,
1309 0x42b60000,
1310 0xe81bf402,
1311/* 0x055e: memx_func_wait */
1312 0x2c0800f8,
1313 0x980088cf,
1314 0x1d98001e,
1315 0x021c9801,
1316 0xb6031b98,
1317 0x747e1010,
1318 0x00f80000,
1319/* 0x0578: memx_func_delay */
1320 0xb6001e98,
1321 0x587e0410,
1322 0x00f80000,
1323/* 0x0584: memx_func_train */
1324/* 0x0586: memx_exec */
1325 0xe0f900f8,
1326 0xc1b2d0f9,
1327/* 0x058e: memx_exec_next */
1328 0x1398b2b2,
1303 0x0410b600, 1329 0x0410b600,
1304/* 0x0546: memx_func_wr32 */ 1330 0x01f034e7,
1305 0x169800f8, 1331 0x01e033e7,
1306 0x01159800, 1332 0xf00132b6,
1307 0xf90810b6, 1333 0x35980c30,
1308 0xfc50f960, 1334 0xa655f9de,
1335 0xe51ef412,
1336 0x98f10b98,
1337 0xcbbbf20c,
1338 0x07c44b02,
1339 0xfc00bbcf,
1309 0x7ee0fcd0, 1340 0x7ee0fcd0,
1310 0xb600002d,
1311 0x1bf40242,
1312/* 0x0563: memx_func_wait */
1313 0x0800f8e8,
1314 0x0088cf2c,
1315 0x98001e98,
1316 0x1c98011d,
1317 0x031b9802,
1318 0x7e1010b6,
1319 0xf8000074,
1320/* 0x057d: memx_func_delay */
1321 0x001e9800,
1322 0x7e0410b6,
1323 0xf8000058,
1324/* 0x0589: memx_func_train */
1325/* 0x058b: memx_exec */
1326 0xf900f800,
1327 0xb2d0f9e0,
1328/* 0x0593: memx_exec_next */
1329 0x98b2b2c1,
1330 0x10b60013,
1331 0xf034e704,
1332 0xe033e701,
1333 0x0132b601,
1334 0x980c30f0,
1335 0x55f9de35,
1336 0x1ef412a6,
1337 0xf10b98e5,
1338 0xbbf20c98,
1339 0xc44b02cb,
1340 0x00bbcf07,
1341 0xe0fcd0fc,
1342 0x00029f7e,
1343/* 0x05ca: memx_info */
1344 0xc67000f8,
1345 0x0c0bf401,
1346/* 0x05d0: memx_info_data */
1347 0x4b03cc4c,
1348 0x0ef40800,
1349/* 0x05d9: memx_info_train */
1350 0x0bcc4c09,
1351/* 0x05df: memx_info_send */
1352 0x7e01004b,
1353 0xf800029f, 1341 0xf800029f,
1354/* 0x05e5: memx_recv */ 1342/* 0x05c5: memx_info */
1355 0x01d6b000, 1343 0x01c67000,
1356 0xb0a30bf4, 1344/* 0x05cb: memx_info_data */
1357 0x0bf400d6, 1345 0x4c0c0bf4,
1358/* 0x05f3: memx_init */ 1346 0x004b03cc,
1359 0xf800f8dc, 1347 0x090ef408,
1360/* 0x05f5: perf_recv */ 1348/* 0x05d4: memx_info_train */
1361/* 0x05f7: perf_init */ 1349 0x4b0bcc4c,
1362 0xf800f800, 1350/* 0x05da: memx_info_send */
1363/* 0x05f9: i2c_drive_scl */ 1351 0x9f7e0100,
1364 0x0036b000, 1352 0x00f80002,
1365 0x400d0bf4, 1353/* 0x05e0: memx_recv */
1366 0x01f607e0, 1354 0xf401d6b0,
1367 0xf804bd00, 1355 0xd6b0a30b,
1368/* 0x0609: i2c_drive_scl_lo */ 1356 0xdc0bf400,
1369 0x07e44000, 1357/* 0x05ee: memx_init */
1370 0xbd0001f6, 1358 0x00f800f8,
1371/* 0x0613: i2c_drive_sda */ 1359/* 0x05f0: perf_recv */
1372 0xb000f804, 1360/* 0x05f2: perf_init */
1373 0x0bf40036, 1361 0x00f800f8,
1374 0x07e0400d, 1362/* 0x05f4: i2c_drive_scl */
1375 0xbd0002f6, 1363 0xf40036b0,
1376/* 0x0623: i2c_drive_sda_lo */ 1364 0xe0400d0b,
1377 0x4000f804, 1365 0x0001f607,
1378 0x02f607e4, 1366 0x00f804bd,
1379 0xf804bd00, 1367/* 0x0604: i2c_drive_scl_lo */
1380/* 0x062d: i2c_sense_scl */ 1368 0xf607e440,
1381 0x0132f400, 1369 0x04bd0001,
1382 0xcf07c443, 1370/* 0x060e: i2c_drive_sda */
1383 0x31fd0033, 1371 0x36b000f8,
1384 0x060bf404, 1372 0x0d0bf400,
1385/* 0x063f: i2c_sense_scl_done */ 1373 0xf607e040,
1386 0xf80131f4, 1374 0x04bd0002,
1387/* 0x0641: i2c_sense_sda */ 1375/* 0x061e: i2c_drive_sda_lo */
1388 0x0132f400, 1376 0xe44000f8,
1389 0xcf07c443, 1377 0x0002f607,
1390 0x32fd0033, 1378 0x00f804bd,
1391 0x060bf404, 1379/* 0x0628: i2c_sense_scl */
1392/* 0x0653: i2c_sense_sda_done */ 1380 0x430132f4,
1393 0xf80131f4, 1381 0x33cf07c4,
1394/* 0x0655: i2c_raise_scl */ 1382 0x0431fd00,
1395 0x4440f900, 1383 0xf4060bf4,
1396 0x01030898, 1384/* 0x063a: i2c_sense_scl_done */
1397 0x0005f97e, 1385 0x00f80131,
1398/* 0x0660: i2c_raise_scl_wait */ 1386/* 0x063c: i2c_sense_sda */
1399 0x7e03e84e, 1387 0x430132f4,
1400 0x7e000058, 1388 0x33cf07c4,
1401 0xf400062d, 1389 0x0432fd00,
1402 0x42b60901, 1390 0xf4060bf4,
1403 0xef1bf401, 1391/* 0x064e: i2c_sense_sda_done */
1404/* 0x0674: i2c_raise_scl_done */ 1392 0x00f80131,
1405 0x00f840fc, 1393/* 0x0650: i2c_raise_scl */
1406/* 0x0678: i2c_start */ 1394 0x984440f9,
1407 0x00062d7e, 1395 0x7e010308,
1408 0x7e0d11f4, 1396/* 0x065b: i2c_raise_scl_wait */
1409 0xf4000641, 1397 0x4e0005f4,
1410 0x0ef40611, 1398 0x587e03e8,
1411/* 0x0689: i2c_start_rep */ 1399 0x287e0000,
1412 0x7e00032e, 1400 0x01f40006,
1413 0x030005f9, 1401 0x0142b609,
1414 0x06137e01, 1402/* 0x066f: i2c_raise_scl_done */
1403 0xfcef1bf4,
1404/* 0x0673: i2c_start */
1405 0x7e00f840,
1406 0xf4000628,
1407 0x3c7e0d11,
1408 0x11f40006,
1409 0x2e0ef406,
1410/* 0x0684: i2c_start_rep */
1411 0xf47e0003,
1412 0x01030005,
1413 0x00060e7e,
1414 0xb60076bb,
1415 0x50f90465,
1416 0xbb046594,
1417 0x50bd0256,
1418 0xfc0475fd,
1419 0x06507e50,
1420 0x0464b600,
1421/* 0x06af: i2c_start_send */
1422 0x031d11f4,
1423 0x060e7e00,
1424 0x13884e00,
1425 0x0000587e,
1426 0xf47e0003,
1427 0x884e0005,
1428 0x00587e13,
1429/* 0x06c9: i2c_start_out */
1430/* 0x06cb: i2c_stop */
1431 0x0300f800,
1432 0x05f47e00,
1433 0x7e000300,
1434 0x4e00060e,
1435 0x587e03e8,
1436 0x01030000,
1437 0x0005f47e,
1438 0x7e13884e,
1439 0x03000058,
1440 0x060e7e01,
1441 0x13884e00,
1442 0x0000587e,
1443/* 0x06fa: i2c_bitw */
1444 0x0e7e00f8,
1445 0xe84e0006,
1446 0x00587e03,
1415 0x0076bb00, 1447 0x0076bb00,
1416 0xf90465b6, 1448 0xf90465b6,
1417 0x04659450, 1449 0x04659450,
1418 0xbd0256bb, 1450 0xbd0256bb,
1419 0x0475fd50, 1451 0x0475fd50,
1420 0x557e50fc, 1452 0x507e50fc,
1421 0x64b60006, 1453 0x64b60006,
1422 0x1d11f404, 1454 0x1711f404,
1423/* 0x06b4: i2c_start_send */ 1455 0x7e13884e,
1424 0x137e0003,
1425 0x884e0006,
1426 0x00587e13,
1427 0x7e000300,
1428 0x4e0005f9,
1429 0x587e1388,
1430/* 0x06ce: i2c_start_out */
1431 0x00f80000,
1432/* 0x06d0: i2c_stop */
1433 0xf97e0003,
1434 0x00030005,
1435 0x0006137e,
1436 0x7e03e84e,
1437 0x03000058, 1456 0x03000058,
1438 0x05f97e01, 1457 0x05f47e00,
1439 0x13884e00, 1458 0x13884e00,
1440 0x0000587e, 1459 0x0000587e,
1441 0x137e0103, 1460/* 0x0738: i2c_bitw_out */
1442 0x884e0006, 1461/* 0x073a: i2c_bitr */
1443 0x00587e13, 1462 0x010300f8,
1444/* 0x06ff: i2c_bitw */ 1463 0x00060e7e,
1445 0x7e00f800, 1464 0x7e03e84e,
1446 0x4e000613, 1465 0xbb000058,
1447 0x587e03e8, 1466 0x65b60076,
1448 0x76bb0000, 1467 0x9450f904,
1468 0x56bb0465,
1469 0xfd50bd02,
1470 0x50fc0475,
1471 0x0006507e,
1472 0xf40464b6,
1473 0x3c7e1a11,
1474 0x00030006,
1475 0x0005f47e,
1476 0x7e13884e,
1477 0xf0000058,
1478 0x31f4013c,
1479/* 0x077d: i2c_bitr_done */
1480/* 0x077f: i2c_get_byte */
1481 0x0500f801,
1482/* 0x0783: i2c_get_byte_next */
1483 0xb6080400,
1484 0x76bb0154,
1449 0x0465b600, 1485 0x0465b600,
1450 0x659450f9, 1486 0x659450f9,
1451 0x0256bb04, 1487 0x0256bb04,
1452 0x75fd50bd, 1488 0x75fd50bd,
1453 0x7e50fc04, 1489 0x7e50fc04,
1454 0xb6000655, 1490 0xb600073a,
1455 0x11f40464, 1491 0x11f40464,
1456 0x13884e17, 1492 0x0553fd2a,
1457 0x0000587e, 1493 0xf40142b6,
1458 0xf97e0003, 1494 0x0103d81b,
1459 0x884e0005,
1460 0x00587e13,
1461/* 0x073d: i2c_bitw_out */
1462/* 0x073f: i2c_bitr */
1463 0x0300f800,
1464 0x06137e01,
1465 0x03e84e00,
1466 0x0000587e,
1467 0xb60076bb, 1495 0xb60076bb,
1468 0x50f90465, 1496 0x50f90465,
1469 0xbb046594, 1497 0xbb046594,
1470 0x50bd0256, 1498 0x50bd0256,
1471 0xfc0475fd, 1499 0xfc0475fd,
1472 0x06557e50, 1500 0x06fa7e50,
1473 0x0464b600, 1501 0x0464b600,
1474 0x7e1a11f4, 1502/* 0x07cc: i2c_get_byte_done */
1475 0x03000641, 1503/* 0x07ce: i2c_put_byte */
1476 0x05f97e00, 1504 0x080400f8,
1477 0x13884e00, 1505/* 0x07d0: i2c_put_byte_next */
1478 0x0000587e, 1506 0xff0142b6,
1479 0xf4013cf0, 1507 0x76bb3854,
1480/* 0x0782: i2c_bitr_done */ 1508 0x0465b600,
1481 0x00f80131, 1509 0x659450f9,
1482/* 0x0784: i2c_get_byte */ 1510 0x0256bb04,
1483 0x08040005, 1511 0x75fd50bd,
1484/* 0x0788: i2c_get_byte_next */ 1512 0x7e50fc04,
1485 0xbb0154b6, 1513 0xb60006fa,
1486 0x65b60076, 1514 0x11f40464,
1487 0x9450f904, 1515 0x0046b034,
1488 0x56bb0465, 1516 0xbbd81bf4,
1489 0xfd50bd02,
1490 0x50fc0475,
1491 0x00073f7e,
1492 0xf40464b6,
1493 0x53fd2a11,
1494 0x0142b605,
1495 0x03d81bf4,
1496 0x0076bb01,
1497 0xf90465b6,
1498 0x04659450,
1499 0xbd0256bb,
1500 0x0475fd50,
1501 0xff7e50fc,
1502 0x64b60006,
1503/* 0x07d1: i2c_get_byte_done */
1504/* 0x07d3: i2c_put_byte */
1505 0x0400f804,
1506/* 0x07d5: i2c_put_byte_next */
1507 0x0142b608,
1508 0xbb3854ff,
1509 0x65b60076, 1517 0x65b60076,
1510 0x9450f904, 1518 0x9450f904,
1511 0x56bb0465, 1519 0x56bb0465,
1512 0xfd50bd02, 1520 0xfd50bd02,
1513 0x50fc0475, 1521 0x50fc0475,
1514 0x0006ff7e, 1522 0x00073a7e,
1515 0xf40464b6, 1523 0xf40464b6,
1516 0x46b03411, 1524 0x76bb0f11,
1517 0xd81bf400, 1525 0x0136b000,
1526 0xf4061bf4,
1527/* 0x0826: i2c_put_byte_done */
1528 0x00f80132,
1529/* 0x0828: i2c_addr */
1518 0xb60076bb, 1530 0xb60076bb,
1519 0x50f90465, 1531 0x50f90465,
1520 0xbb046594, 1532 0xbb046594,
1521 0x50bd0256, 1533 0x50bd0256,
1522 0xfc0475fd, 1534 0xfc0475fd,
1523 0x073f7e50, 1535 0x06737e50,
1524 0x0464b600, 1536 0x0464b600,
1525 0xbb0f11f4, 1537 0xe72911f4,
1526 0x36b00076, 1538 0xb6012ec3,
1527 0x061bf401, 1539 0x53fd0134,
1528/* 0x082b: i2c_put_byte_done */ 1540 0x0076bb05,
1529 0xf80132f4,
1530/* 0x082d: i2c_addr */
1531 0x0076bb00,
1532 0xf90465b6, 1541 0xf90465b6,
1533 0x04659450, 1542 0x04659450,
1534 0xbd0256bb, 1543 0xbd0256bb,
1535 0x0475fd50, 1544 0x0475fd50,
1536 0x787e50fc, 1545 0xce7e50fc,
1537 0x64b60006, 1546 0x64b60007,
1538 0x2911f404, 1547/* 0x086d: i2c_addr_done */
1539 0x012ec3e7, 1548/* 0x086f: i2c_acquire_addr */
1540 0xfd0134b6, 1549 0xc700f804,
1541 0x76bb0553, 1550 0xe4b6f8ce,
1542 0x0465b600, 1551 0x14e0b705,
1543 0x659450f9, 1552/* 0x087b: i2c_acquire */
1544 0x0256bb04, 1553 0x7e00f8d0,
1545 0x75fd50bd, 1554 0x7e00086f,
1546 0x7e50fc04, 1555 0xf0000004,
1547 0xb60007d3, 1556 0x2d7e03d9,
1548/* 0x0872: i2c_addr_done */ 1557 0x00f80000,
1549 0x00f80464, 1558/* 0x088c: i2c_release */
1550/* 0x0874: i2c_acquire_addr */ 1559 0x00086f7e,
1551 0xb6f8cec7,
1552 0xe0b705e4,
1553 0x00f8d014,
1554/* 0x0880: i2c_acquire */
1555 0x0008747e,
1556 0x0000047e, 1560 0x0000047e,
1557 0x7e03d9f0, 1561 0x7e03daf0,
1558 0xf800002d, 1562 0xf800002d,
1559/* 0x0891: i2c_release */ 1563/* 0x089d: i2c_recv */
1560 0x08747e00, 1564 0x0132f400,
1561 0x00047e00, 1565 0xb6f8c1c7,
1562 0x03daf000, 1566 0x16b00214,
1563 0x00002d7e, 1567 0x341ff528,
1564/* 0x08a2: i2c_recv */ 1568 0xf413b801,
1565 0x32f400f8, 1569 0x3298000c,
1566 0xf8c1c701, 1570 0xcc13b800,
1567 0xb00214b6, 1571 0x3198000c,
1568 0x1ff52816, 1572 0x0231f400,
1569 0x13b80134, 1573 0xe0f9d0f9,
1570 0x98000cf4, 1574 0x00d6d0f9,
1571 0x13b80032, 1575 0x92100000,
1572 0x98000ccc, 1576 0x76bb0167,
1573 0x31f40031,
1574 0xf9d0f902,
1575 0xd6d0f9e0,
1576 0x10000000,
1577 0xbb016792,
1578 0x65b60076,
1579 0x9450f904,
1580 0x56bb0465,
1581 0xfd50bd02,
1582 0x50fc0475,
1583 0x0008807e,
1584 0xfc0464b6,
1585 0x00d6b0d0,
1586 0x00b01bf5,
1587 0x76bb0005,
1588 0x0465b600, 1577 0x0465b600,
1589 0x659450f9, 1578 0x659450f9,
1590 0x0256bb04, 1579 0x0256bb04,
1591 0x75fd50bd, 1580 0x75fd50bd,
1592 0x7e50fc04, 1581 0x7e50fc04,
1593 0xb600082d, 1582 0xb600087b,
1594 0x11f50464, 1583 0xd0fc0464,
1595 0xc5c700cc, 1584 0xf500d6b0,
1596 0x0076bbe0, 1585 0x0500b01b,
1597 0xf90465b6, 1586 0x0076bb00,
1598 0x04659450,
1599 0xbd0256bb,
1600 0x0475fd50,
1601 0xd37e50fc,
1602 0x64b60007,
1603 0xa911f504,
1604 0xbb010500,
1605 0x65b60076,
1606 0x9450f904,
1607 0x56bb0465,
1608 0xfd50bd02,
1609 0x50fc0475,
1610 0x00082d7e,
1611 0xf50464b6,
1612 0xbb008711,
1613 0x65b60076,
1614 0x9450f904,
1615 0x56bb0465,
1616 0xfd50bd02,
1617 0x50fc0475,
1618 0x0007847e,
1619 0xf40464b6,
1620 0x5bcb6711,
1621 0x0076bbe0,
1622 0xf90465b6, 1587 0xf90465b6,
1623 0x04659450, 1588 0x04659450,
1624 0xbd0256bb, 1589 0xbd0256bb,
1625 0x0475fd50, 1590 0x0475fd50,
1626 0xd07e50fc, 1591 0x287e50fc,
1627 0x64b60006, 1592 0x64b60008,
1628 0xbd5bb204, 1593 0xcc11f504,
1629 0x410ef474, 1594 0xe0c5c700,
1630/* 0x09a4: i2c_recv_not_rd08 */ 1595 0xb60076bb,
1631 0xf401d6b0, 1596 0x50f90465,
1632 0x00053b1b, 1597 0xbb046594,
1633 0x00082d7e, 1598 0x50bd0256,
1634 0xc73211f4, 1599 0xfc0475fd,
1635 0xd37ee0c5, 1600 0x07ce7e50,
1636 0x11f40007, 1601 0x0464b600,
1637 0x7e000528, 1602 0x00a911f5,
1638 0xf400082d, 1603 0x76bb0105,
1639 0xb5c71f11, 1604 0x0465b600,
1640 0x07d37ee0, 1605 0x659450f9,
1641 0x1511f400, 1606 0x0256bb04,
1642 0x0006d07e, 1607 0x75fd50bd,
1643 0xc5c774bd, 1608 0x7e50fc04,
1644 0x091bf408, 1609 0xb6000828,
1645 0xf40232f4, 1610 0x11f50464,
1646/* 0x09e2: i2c_recv_not_wr08 */ 1611 0x76bb0087,
1647/* 0x09e2: i2c_recv_done */ 1612 0x0465b600,
1648 0xcec7030e, 1613 0x659450f9,
1649 0x08917ef8, 1614 0x0256bb04,
1650 0xfce0fc00, 1615 0x75fd50bd,
1651 0x0912f4d0, 1616 0x7e50fc04,
1652 0x9f7e7cb2, 1617 0xb600077f,
1653/* 0x09f6: i2c_recv_exit */ 1618 0x11f40464,
1654 0x00f80002, 1619 0xe05bcb67,
1655/* 0x09f8: i2c_init */ 1620 0xb60076bb,
1656/* 0x09fa: test_recv */ 1621 0x50f90465,
1657 0x584100f8, 1622 0xbb046594,
1658 0x0011cf04, 1623 0x50bd0256,
1659 0x400110b6, 1624 0xfc0475fd,
1660 0x01f60458, 1625 0x06cb7e50,
1661 0xde04bd00, 1626 0x0464b600,
1662 0x134fd900, 1627 0x74bd5bb2,
1663 0x0001de7e, 1628/* 0x099f: i2c_recv_not_rd08 */
1664/* 0x0a16: test_init */ 1629 0xb0410ef4,
1665 0x004e00f8, 1630 0x1bf401d6,
1666 0x01de7e08, 1631 0x7e00053b,
1667/* 0x0a1f: idle_recv */ 1632 0xf4000828,
1633 0xc5c73211,
1634 0x07ce7ee0,
1635 0x2811f400,
1636 0x287e0005,
1637 0x11f40008,
1638 0xe0b5c71f,
1639 0x0007ce7e,
1640 0x7e1511f4,
1641 0xbd0006cb,
1642 0x08c5c774,
1643 0xf4091bf4,
1644 0x0ef40232,
1645/* 0x09dd: i2c_recv_not_wr08 */
1646/* 0x09dd: i2c_recv_done */
1647 0xf8cec703,
1648 0x00088c7e,
1649 0xd0fce0fc,
1650 0xb20912f4,
1651 0x029f7e7c,
1652/* 0x09f1: i2c_recv_exit */
1653/* 0x09f3: i2c_init */
1668 0xf800f800, 1654 0xf800f800,
1669/* 0x0a21: idle */ 1655/* 0x09f5: test_recv */
1670 0x0031f400, 1656 0x04584100,
1671 0xcf045441, 1657 0xb60011cf,
1672 0x10b60011, 1658 0x58400110,
1673 0x04544001, 1659 0x0001f604,
1674 0xbd0001f6, 1660 0x00de04bd,
1675/* 0x0a35: idle_loop */ 1661 0x7e134fd9,
1676 0xf4580104, 1662 0xf80001de,
1677/* 0x0a3a: idle_proc */ 1663/* 0x0a11: test_init */
1678/* 0x0a3a: idle_proc_exec */ 1664 0x08004e00,
1679 0x10f90232, 1665 0x0001de7e,
1680 0xa87e1eb2, 1666/* 0x0a1a: idle_recv */
1681 0x10fc0002, 1667 0x00f800f8,
1682 0xf40911f4, 1668/* 0x0a1c: idle */
1683 0x0ef40231, 1669 0x410031f4,
1684/* 0x0a4d: idle_proc_next */ 1670 0x11cf0454,
1685 0x5810b6f0, 1671 0x0110b600,
1686 0x1bf41fa6, 1672 0xf6045440,
1687 0xe002f4e8, 1673 0x04bd0001,
1688 0xf40028f4, 1674/* 0x0a30: idle_loop */
1689 0x0000c60e, 1675 0x32f45801,
1676/* 0x0a35: idle_proc */
1677/* 0x0a35: idle_proc_exec */
1678 0xb210f902,
1679 0x02a87e1e,
1680 0xf410fc00,
1681 0x31f40911,
1682 0xf00ef402,
1683/* 0x0a48: idle_proc_next */
1684 0xa65810b6,
1685 0xe81bf41f,
1686 0xf4e002f4,
1687 0x0ef40028,
1688 0x000000c6,
1689 0x00000000,
1690 0x00000000, 1690 0x00000000,
1691 0x00000000, 1691 0x00000000,
1692 0x00000000, 1692 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
index 6a2572e8945a..defddf5957ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
@@ -47,8 +47,8 @@ static uint32_t gt215_pmu_data[] = {
47 0x00000000, 47 0x00000000,
48 0x00000000, 48 0x00000000,
49 0x584d454d, 49 0x584d454d,
50 0x0000083a, 50 0x00000833,
51 0x0000082c, 51 0x00000825,
52 0x00000000, 52 0x00000000,
53 0x00000000, 53 0x00000000,
54 0x00000000, 54 0x00000000,
@@ -69,8 +69,8 @@ static uint32_t gt215_pmu_data[] = {
69 0x00000000, 69 0x00000000,
70 0x00000000, 70 0x00000000,
71 0x46524550, 71 0x46524550,
72 0x0000083e, 72 0x00000837,
73 0x0000083c, 73 0x00000835,
74 0x00000000, 74 0x00000000,
75 0x00000000, 75 0x00000000,
76 0x00000000, 76 0x00000000,
@@ -91,8 +91,8 @@ static uint32_t gt215_pmu_data[] = {
91 0x00000000, 91 0x00000000,
92 0x00000000, 92 0x00000000,
93 0x5f433249, 93 0x5f433249,
94 0x00000c6e, 94 0x00000c67,
95 0x00000b11, 95 0x00000b0a,
96 0x00000000, 96 0x00000000,
97 0x00000000, 97 0x00000000,
98 0x00000000, 98 0x00000000,
@@ -113,8 +113,8 @@ static uint32_t gt215_pmu_data[] = {
113 0x00000000, 113 0x00000000,
114 0x00000000, 114 0x00000000,
115 0x54534554, 115 0x54534554,
116 0x00000c97, 116 0x00000c90,
117 0x00000c70, 117 0x00000c69,
118 0x00000000, 118 0x00000000,
119 0x00000000, 119 0x00000000,
120 0x00000000, 120 0x00000000,
@@ -135,8 +135,8 @@ static uint32_t gt215_pmu_data[] = {
135 0x00000000, 135 0x00000000,
136 0x00000000, 136 0x00000000,
137 0x454c4449, 137 0x454c4449,
138 0x00000ca3, 138 0x00000c9c,
139 0x00000ca1, 139 0x00000c9a,
140 0x00000000, 140 0x00000000,
141 0x00000000, 141 0x00000000,
142 0x00000000, 142 0x00000000,
@@ -234,22 +234,22 @@ static uint32_t gt215_pmu_data[] = {
234/* 0x037c: memx_func_next */ 234/* 0x037c: memx_func_next */
235 0x00000002, 235 0x00000002,
236 0x00000000, 236 0x00000000,
237 0x000005a0, 237 0x0000059f,
238 0x00000003, 238 0x00000003,
239 0x00000002, 239 0x00000002,
240 0x00000632, 240 0x0000062f,
241 0x00040004, 241 0x00040004,
242 0x00000000, 242 0x00000000,
243 0x0000064e, 243 0x0000064b,
244 0x00010005, 244 0x00010005,
245 0x00000000, 245 0x00000000,
246 0x0000066b, 246 0x00000668,
247 0x00010006, 247 0x00010006,
248 0x00000000, 248 0x00000000,
249 0x000005f0, 249 0x000005ef,
250 0x00000007, 250 0x00000007,
251 0x00000000, 251 0x00000000,
252 0x00000676, 252 0x00000673,
253/* 0x03c4: memx_func_tail */ 253/* 0x03c4: memx_func_tail */
254/* 0x03c4: memx_ts_start */ 254/* 0x03c4: memx_ts_start */
255 0x00000000, 255 0x00000000,
@@ -1305,560 +1305,560 @@ static uint32_t gt215_pmu_code[] = {
1305 0x67f102d7, 1305 0x67f102d7,
1306 0x63f1fffc, 1306 0x63f1fffc,
1307 0x76fdffff, 1307 0x76fdffff,
1308 0x0267f104, 1308 0x0267f004,
1309 0x0576fd00, 1309 0xf90576fd,
1310 0x70f980f9, 1310 0xfc70f980,
1311 0xe0fcd0fc, 1311 0xf4e0fcd0,
1312 0xf04021f4, 1312 0x67f04021,
1313 0xe007f104,
1314 0x0604b607,
1315 0xbd0006d0,
1316/* 0x0581: memx_func_enter_wait */
1317 0xc067f104,
1318 0x0664b607,
1319 0xf00066cf,
1320 0x0bf40464,
1321 0x2c67f0f3,
1322 0xcf0664b6,
1323 0x06800066,
1324/* 0x059f: memx_func_leave */
1325 0xf000f8f1,
1326 0x64b62c67,
1327 0x0066cf06,
1328 0xf0f20680,
1313 0x07f10467, 1329 0x07f10467,
1314 0x04b607e0, 1330 0x04b607e4,
1315 0x0006d006, 1331 0x0006d006,
1316/* 0x0582: memx_func_enter_wait */ 1332/* 0x05ba: memx_func_leave_wait */
1317 0x67f104bd, 1333 0x67f104bd,
1318 0x64b607c0, 1334 0x64b607c0,
1319 0x0066cf06, 1335 0x0066cf06,
1320 0xf40464f0, 1336 0xf40464f0,
1321 0x67f0f30b, 1337 0x87f1f31b,
1322 0x0664b62c, 1338 0x8eb91610,
1323 0x800066cf, 1339 0x0421f402,
1324 0x00f8f106, 1340 0xf102d7b9,
1325/* 0x05a0: memx_func_leave */ 1341 0xf1ffcc67,
1326 0xb62c67f0, 1342 0xfdffff63,
1327 0x66cf0664, 1343 0x80f90476,
1328 0xf2068000, 1344 0xd0fc70f9,
1329 0xf10467f0, 1345 0x21f4e0fc,
1330 0xb607e407, 1346/* 0x05ef: memx_func_wait_vblank */
1331 0x06d00604, 1347 0x9800f840,
1332/* 0x05bb: memx_func_leave_wait */ 1348 0x66b00016,
1333 0xf104bd00, 1349 0x120bf400,
1334 0xb607c067, 1350 0xf40166b0,
1335 0x66cf0664, 1351 0x0ef4060b,
1336 0x0464f000, 1352/* 0x0601: memx_func_wait_vblank_head1 */
1337 0xf1f31bf4, 1353 0x2077f02c,
1338 0xb9161087, 1354/* 0x0607: memx_func_wait_vblank_head0 */
1339 0x21f4028e, 1355 0xf0060ef4,
1340 0x02d7b904, 1356/* 0x060a: memx_func_wait_vblank_0 */
1341 0xffcc67f1, 1357 0x67f10877,
1342 0xffff63f1, 1358 0x64b607c4,
1343 0xf90476fd, 1359 0x0066cf06,
1344 0xfc70f980, 1360 0xf40467fd,
1345 0xf4e0fcd0, 1361/* 0x061a: memx_func_wait_vblank_1 */
1346 0x00f84021, 1362 0x67f1f31b,
1347/* 0x05f0: memx_func_wait_vblank */ 1363 0x64b607c4,
1348 0xb0001698, 1364 0x0066cf06,
1349 0x0bf40066, 1365 0xf40467fd,
1350 0x0166b013, 1366/* 0x062a: memx_func_wait_vblank_fini */
1351 0xf4060bf4, 1367 0x10b6f30b,
1352/* 0x0602: memx_func_wait_vblank_head1 */ 1368/* 0x062f: memx_func_wr32 */
1353 0x77f12e0e, 1369 0x9800f804,
1354 0x0ef40020, 1370 0x15980016,
1355/* 0x0609: memx_func_wait_vblank_head0 */ 1371 0x0810b601,
1356 0x0877f107, 1372 0x50f960f9,
1357/* 0x060d: memx_func_wait_vblank_0 */
1358 0xc467f100,
1359 0x0664b607,
1360 0xfd0066cf,
1361 0x1bf40467,
1362/* 0x061d: memx_func_wait_vblank_1 */
1363 0xc467f1f3,
1364 0x0664b607,
1365 0xfd0066cf,
1366 0x0bf40467,
1367/* 0x062d: memx_func_wait_vblank_fini */
1368 0x0410b6f3,
1369/* 0x0632: memx_func_wr32 */
1370 0x169800f8,
1371 0x01159800,
1372 0xf90810b6,
1373 0xfc50f960,
1374 0xf4e0fcd0,
1375 0x42b64021,
1376 0xe91bf402,
1377/* 0x064e: memx_func_wait */
1378 0x87f000f8,
1379 0x0684b62c,
1380 0x980088cf,
1381 0x1d98001e,
1382 0x021c9801,
1383 0xb6031b98,
1384 0x21f41010,
1385/* 0x066b: memx_func_delay */
1386 0x9800f8a3,
1387 0x10b6001e,
1388 0x7e21f404,
1389/* 0x0676: memx_func_train */
1390 0x57f100f8,
1391 0x77f10003,
1392 0x97f10000,
1393 0x93f00000,
1394 0x029eb970,
1395 0xb90421f4,
1396 0xe7f102d8,
1397 0x21f42710,
1398/* 0x0695: memx_func_train_loop_outer */
1399 0x0158e07e,
1400 0x0083f101,
1401 0xe097f102,
1402 0x1193f011,
1403 0x80f990f9,
1404 0xe0fcd0fc, 1373 0xe0fcd0fc,
1405 0xf94021f4, 1374 0xb64021f4,
1406 0x0067f150, 1375 0x1bf40242,
1407/* 0x06b5: memx_func_train_loop_inner */ 1376/* 0x064b: memx_func_wait */
1408 0x1187f100, 1377 0xf000f8e9,
1409 0x9068ff11, 1378 0x84b62c87,
1410 0xfd109894, 1379 0x0088cf06,
1411 0x97f10589, 1380 0x98001e98,
1412 0x93f00720, 1381 0x1c98011d,
1413 0xf990f910, 1382 0x031b9802,
1414 0xfcd0fc80, 1383 0xf41010b6,
1415 0x4021f4e0, 1384 0x00f8a321,
1416 0x008097f1, 1385/* 0x0668: memx_func_delay */
1417 0xb91093f0, 1386 0xb6001e98,
1418 0x21f4029e, 1387 0x21f40410,
1419 0x02d8b904, 1388/* 0x0673: memx_func_train */
1420 0xf92088c5, 1389 0xf000f87e,
1390 0x77f00357,
1391 0x0097f100,
1392 0x7093f000,
1393 0xf4029eb9,
1394 0xd8b90421,
1395 0x10e7f102,
1396 0x7e21f427,
1397/* 0x0690: memx_func_train_loop_outer */
1398 0x010158e0,
1399 0x020083f1,
1400 0x11e097f1,
1401 0xf91193f0,
1402 0xfc80f990,
1403 0xf4e0fcd0,
1404 0x50f94021,
1405/* 0x06af: memx_func_train_loop_inner */
1406 0xf10067f0,
1407 0xff111187,
1408 0x98949068,
1409 0x0589fd10,
1410 0x072097f1,
1411 0xf91093f0,
1421 0xfc80f990, 1412 0xfc80f990,
1422 0xf4e0fcd0, 1413 0xf4e0fcd0,
1423 0x97f14021, 1414 0x97f14021,
1424 0x93f0053c, 1415 0x93f00080,
1425 0x0287f110, 1416 0x029eb910,
1426 0x0083f130, 1417 0xb90421f4,
1427 0xf990f980, 1418 0x88c502d8,
1419 0xf990f920,
1428 0xfcd0fc80, 1420 0xfcd0fc80,
1429 0x4021f4e0, 1421 0x4021f4e0,
1430 0x0560e7f1, 1422 0x053c97f1,
1431 0xf110e3f0, 1423 0xf11093f0,
1432 0xf10000d7, 1424 0xf1300287,
1433 0x908000d3, 1425 0xf9800083,
1434 0xb7f100dc, 1426 0xfc80f990,
1435 0xb3f08480,
1436 0xa321f41e,
1437 0x000057f1,
1438 0xffff97f1,
1439 0x830093f1,
1440/* 0x0734: memx_func_train_loop_4x */
1441 0x0080a7f1,
1442 0xb910a3f0,
1443 0x21f402ae,
1444 0x02d8b904,
1445 0xffdfb7f1,
1446 0xffffb3f1,
1447 0xf9048bfd,
1448 0xfc80f9a0,
1449 0xf4e0fcd0, 1427 0xf4e0fcd0,
1450 0xa7f14021, 1428 0xe7f14021,
1451 0xa3f0053c, 1429 0xe3f00560,
1452 0x0287f110, 1430 0x00d7f110,
1453 0x0083f130, 1431 0x00d3f100,
1454 0xf9a0f980, 1432 0x00dc9080,
1455 0xfcd0fc80, 1433 0x8480b7f1,
1456 0x4021f4e0, 1434 0xf41eb3f0,
1457 0x0560e7f1, 1435 0x57f0a321,
1458 0xf110e3f0, 1436 0xff97f100,
1459 0xf10000d7, 1437 0x0093f1ff,
1460 0xb98000d3, 1438/* 0x072d: memx_func_train_loop_4x */
1461 0xb7f102dc, 1439 0x80a7f183,
1462 0xb3f02710, 1440 0x10a3f000,
1463 0xa321f400, 1441 0xf402aeb9,
1464 0xf402eeb9, 1442 0xd8b90421,
1465 0xddb90421, 1443 0xdfb7f102,
1466 0x949dff02, 1444 0xffb3f1ff,
1445 0x048bfdff,
1446 0x80f9a0f9,
1447 0xe0fcd0fc,
1448 0xf14021f4,
1449 0xf0053ca7,
1450 0x87f110a3,
1451 0x83f13002,
1452 0xa0f98000,
1453 0xd0fc80f9,
1454 0x21f4e0fc,
1455 0x60e7f140,
1456 0x10e3f005,
1457 0x0000d7f1,
1458 0x8000d3f1,
1459 0xf102dcb9,
1460 0xf02710b7,
1461 0x21f400b3,
1462 0x02eeb9a3,
1463 0xb90421f4,
1464 0x9dff02dd,
1465 0x0150b694,
1466 0xf4045670,
1467 0x7aa0921e,
1468 0xa9800bcc,
1469 0x0160b600,
1470 0x700470b6,
1471 0x1ef51066,
1472 0x50fcff01,
1467 0x700150b6, 1473 0x700150b6,
1468 0x1ef40456, 1474 0x1ef50756,
1469 0xcc7aa092, 1475 0x00f8fed6,
1470 0x00a9800b, 1476/* 0x07c0: memx_exec */
1471 0xb60160b6, 1477 0xd0f9e0f9,
1472 0x66700470, 1478 0xb902c1b9,
1473 0x001ef510, 1479/* 0x07ca: memx_exec_next */
1474 0xb650fcff, 1480 0x139802b2,
1475 0x56700150, 1481 0x0410b600,
1476 0xd41ef507, 1482 0x01f034e7,
1477/* 0x07c7: memx_exec */ 1483 0x01e033e7,
1478 0xf900f8fe, 1484 0xf00132b6,
1479 0xb9d0f9e0, 1485 0x35980c30,
1480 0xb2b902c1, 1486 0xb855f9de,
1481/* 0x07d1: memx_exec_next */ 1487 0x1ef40612,
1482 0x00139802, 1488 0xf10b98e4,
1483 0xe70410b6, 1489 0xbbf20c98,
1484 0xe701f034, 1490 0xb7f102cb,
1485 0xb601e033, 1491 0xb4b607c4,
1486 0x30f00132, 1492 0x00bbcf06,
1487 0xde35980c, 1493 0xe0fcd0fc,
1488 0x12b855f9, 1494 0x033621f5,
1489 0xe41ef406, 1495/* 0x0806: memx_info */
1490 0x98f10b98, 1496 0xc67000f8,
1491 0xcbbbf20c, 1497 0x0e0bf401,
1492 0xc4b7f102, 1498/* 0x080c: memx_info_data */
1493 0x06b4b607, 1499 0x03ccc7f1,
1494 0xfc00bbcf, 1500 0x0800b7f1,
1495 0xf5e0fcd0, 1501/* 0x0817: memx_info_train */
1502 0xf10b0ef4,
1503 0xf10bccc7,
1504/* 0x081f: memx_info_send */
1505 0xf50100b7,
1496 0xf8033621, 1506 0xf8033621,
1497/* 0x080d: memx_info */ 1507/* 0x0825: memx_recv */
1498 0x01c67000, 1508 0x01d6b000,
1499/* 0x0813: memx_info_data */ 1509 0xb0980bf4,
1500 0xf10e0bf4, 1510 0x0bf400d6,
1501 0xf103ccc7, 1511/* 0x0833: memx_init */
1502 0xf40800b7, 1512 0xf800f8d8,
1503/* 0x081e: memx_info_train */ 1513/* 0x0835: perf_recv */
1504 0xc7f10b0e, 1514/* 0x0837: perf_init */
1505 0xb7f10bcc, 1515 0xf800f800,
1506/* 0x0826: memx_info_send */ 1516/* 0x0839: i2c_drive_scl */
1507 0x21f50100, 1517 0x0036b000,
1508 0x00f80336, 1518 0xf1110bf4,
1509/* 0x082c: memx_recv */ 1519 0xb607e007,
1510 0xf401d6b0, 1520 0x01d00604,
1511 0xd6b0980b, 1521 0xf804bd00,
1512 0xd80bf400, 1522/* 0x084d: i2c_drive_scl_lo */
1513/* 0x083a: memx_init */ 1523 0xe407f100,
1514 0x00f800f8, 1524 0x0604b607,
1515/* 0x083c: perf_recv */ 1525 0xbd0001d0,
1516/* 0x083e: perf_init */ 1526/* 0x085b: i2c_drive_sda */
1517 0x00f800f8, 1527 0xb000f804,
1518/* 0x0840: i2c_drive_scl */ 1528 0x0bf40036,
1519 0xf40036b0, 1529 0xe007f111,
1520 0x07f1110b, 1530 0x0604b607,
1521 0x04b607e0, 1531 0xbd0002d0,
1522 0x0001d006, 1532/* 0x086f: i2c_drive_sda_lo */
1523 0x00f804bd, 1533 0xf100f804,
1524/* 0x0854: i2c_drive_scl_lo */ 1534 0xb607e407,
1525 0x07e407f1, 1535 0x02d00604,
1526 0xd00604b6, 1536 0xf804bd00,
1527 0x04bd0001, 1537/* 0x087d: i2c_sense_scl */
1528/* 0x0862: i2c_drive_sda */ 1538 0x0132f400,
1529 0x36b000f8, 1539 0x07c437f1,
1530 0x110bf400, 1540 0xcf0634b6,
1531 0x07e007f1, 1541 0x31fd0033,
1532 0xd00604b6, 1542 0x060bf404,
1533 0x04bd0002, 1543/* 0x0893: i2c_sense_scl_done */
1534/* 0x0876: i2c_drive_sda_lo */ 1544 0xf80131f4,
1535 0x07f100f8, 1545/* 0x0895: i2c_sense_sda */
1536 0x04b607e4, 1546 0x0132f400,
1537 0x0002d006, 1547 0x07c437f1,
1538 0x00f804bd, 1548 0xcf0634b6,
1539/* 0x0884: i2c_sense_scl */ 1549 0x32fd0033,
1540 0xf10132f4, 1550 0x060bf404,
1541 0xb607c437, 1551/* 0x08ab: i2c_sense_sda_done */
1542 0x33cf0634, 1552 0xf80131f4,
1543 0x0431fd00, 1553/* 0x08ad: i2c_raise_scl */
1544 0xf4060bf4, 1554 0xf140f900,
1545/* 0x089a: i2c_sense_scl_done */ 1555 0xf0089847,
1546 0x00f80131, 1556 0x21f50137,
1547/* 0x089c: i2c_sense_sda */ 1557/* 0x08ba: i2c_raise_scl_wait */
1548 0xf10132f4, 1558 0xe7f10839,
1549 0xb607c437, 1559 0x21f403e8,
1550 0x33cf0634, 1560 0x7d21f57e,
1551 0x0432fd00, 1561 0x0901f408,
1552 0xf4060bf4, 1562 0xf40142b6,
1553/* 0x08b2: i2c_sense_sda_done */ 1563/* 0x08ce: i2c_raise_scl_done */
1554 0x00f80131, 1564 0x40fcef1b,
1555/* 0x08b4: i2c_raise_scl */ 1565/* 0x08d2: i2c_start */
1556 0x47f140f9, 1566 0x21f500f8,
1557 0x37f00898, 1567 0x11f4087d,
1558 0x4021f501, 1568 0x9521f50d,
1559/* 0x08c1: i2c_raise_scl_wait */ 1569 0x0611f408,
1570/* 0x08e3: i2c_start_rep */
1571 0xf0300ef4,
1572 0x21f50037,
1573 0x37f00839,
1574 0x5b21f501,
1575 0x0076bb08,
1576 0xf90465b6,
1577 0x04659450,
1578 0xbd0256bb,
1579 0x0475fd50,
1580 0x21f550fc,
1581 0x64b608ad,
1582 0x1f11f404,
1583/* 0x0910: i2c_start_send */
1584 0xf50037f0,
1585 0xf1085b21,
1586 0xf41388e7,
1587 0x37f07e21,
1588 0x3921f500,
1589 0x88e7f108,
1590 0x7e21f413,
1591/* 0x092c: i2c_start_out */
1592/* 0x092e: i2c_stop */
1593 0x37f000f8,
1594 0x3921f500,
1595 0x0037f008,
1596 0x085b21f5,
1597 0x03e8e7f1,
1598 0xf07e21f4,
1599 0x21f50137,
1600 0xe7f10839,
1601 0x21f41388,
1602 0x0137f07e,
1603 0x085b21f5,
1604 0x1388e7f1,
1605 0xf87e21f4,
1606/* 0x0961: i2c_bitw */
1607 0x5b21f500,
1560 0xe8e7f108, 1608 0xe8e7f108,
1561 0x7e21f403, 1609 0x7e21f403,
1562 0x088421f5,
1563 0xb60901f4,
1564 0x1bf40142,
1565/* 0x08d5: i2c_raise_scl_done */
1566 0xf840fcef,
1567/* 0x08d9: i2c_start */
1568 0x8421f500,
1569 0x0d11f408,
1570 0x089c21f5,
1571 0xf40611f4,
1572/* 0x08ea: i2c_start_rep */
1573 0x37f0300e,
1574 0x4021f500,
1575 0x0137f008,
1576 0x086221f5,
1577 0xb60076bb, 1610 0xb60076bb,
1578 0x50f90465, 1611 0x50f90465,
1579 0xbb046594, 1612 0xbb046594,
1580 0x50bd0256, 1613 0x50bd0256,
1581 0xfc0475fd, 1614 0xfc0475fd,
1582 0xb421f550, 1615 0xad21f550,
1583 0x0464b608, 1616 0x0464b608,
1584/* 0x0917: i2c_start_send */ 1617 0xf11811f4,
1585 0xf01f11f4, 1618 0xf41388e7,
1586 0x21f50037,
1587 0xe7f10862,
1588 0x21f41388,
1589 0x0037f07e,
1590 0x084021f5,
1591 0x1388e7f1,
1592/* 0x0933: i2c_start_out */
1593 0xf87e21f4,
1594/* 0x0935: i2c_stop */
1595 0x0037f000,
1596 0x084021f5,
1597 0xf50037f0,
1598 0xf1086221,
1599 0xf403e8e7,
1600 0x37f07e21, 1619 0x37f07e21,
1601 0x4021f501, 1620 0x3921f500,
1602 0x88e7f108, 1621 0x88e7f108,
1603 0x7e21f413, 1622 0x7e21f413,
1604 0xf50137f0, 1623/* 0x09a0: i2c_bitw_out */
1605 0xf1086221, 1624/* 0x09a2: i2c_bitr */
1606 0xf41388e7, 1625 0x37f000f8,
1607 0x00f87e21, 1626 0x5b21f501,
1608/* 0x0968: i2c_bitw */ 1627 0xe8e7f108,
1609 0x086221f5, 1628 0x7e21f403,
1610 0x03e8e7f1, 1629 0xb60076bb,
1611 0xbb7e21f4, 1630 0x50f90465,
1612 0x65b60076, 1631 0xbb046594,
1613 0x9450f904, 1632 0x50bd0256,
1614 0x56bb0465, 1633 0xfc0475fd,
1615 0xfd50bd02, 1634 0xad21f550,
1616 0x50fc0475, 1635 0x0464b608,
1617 0x08b421f5, 1636 0xf51b11f4,
1618 0xf40464b6, 1637 0xf0089521,
1619 0xe7f11811, 1638 0x21f50037,
1639 0xe7f10839,
1620 0x21f41388, 1640 0x21f41388,
1621 0x0037f07e, 1641 0x013cf07e,
1622 0x084021f5, 1642/* 0x09e7: i2c_bitr_done */
1623 0x1388e7f1, 1643 0xf80131f4,
1624/* 0x09a7: i2c_bitw_out */ 1644/* 0x09e9: i2c_get_byte */
1625 0xf87e21f4, 1645 0x0057f000,
1626/* 0x09a9: i2c_bitr */ 1646/* 0x09ef: i2c_get_byte_next */
1627 0x0137f000, 1647 0xb60847f0,
1628 0x086221f5, 1648 0x76bb0154,
1629 0x03e8e7f1, 1649 0x0465b600,
1630 0xbb7e21f4, 1650 0x659450f9,
1631 0x65b60076, 1651 0x0256bb04,
1632 0x9450f904, 1652 0x75fd50bd,
1633 0x56bb0465, 1653 0xf550fc04,
1634 0xfd50bd02, 1654 0xb609a221,
1635 0x50fc0475, 1655 0x11f40464,
1636 0x08b421f5, 1656 0x0553fd2b,
1637 0xf40464b6, 1657 0xf40142b6,
1638 0x21f51b11, 1658 0x37f0d81b,
1639 0x37f0089c,
1640 0x4021f500,
1641 0x88e7f108,
1642 0x7e21f413,
1643 0xf4013cf0,
1644/* 0x09ee: i2c_bitr_done */
1645 0x00f80131,
1646/* 0x09f0: i2c_get_byte */
1647 0xf00057f0,
1648/* 0x09f6: i2c_get_byte_next */
1649 0x54b60847,
1650 0x0076bb01, 1659 0x0076bb01,
1651 0xf90465b6, 1660 0xf90465b6,
1652 0x04659450, 1661 0x04659450,
1653 0xbd0256bb, 1662 0xbd0256bb,
1654 0x0475fd50, 1663 0x0475fd50,
1655 0x21f550fc, 1664 0x21f550fc,
1656 0x64b609a9, 1665 0x64b60961,
1657 0x2b11f404, 1666/* 0x0a39: i2c_get_byte_done */
1658 0xb60553fd, 1667/* 0x0a3b: i2c_put_byte */
1659 0x1bf40142, 1668 0xf000f804,
1660 0x0137f0d8, 1669/* 0x0a3e: i2c_put_byte_next */
1661 0xb60076bb, 1670 0x42b60847,
1662 0x50f90465, 1671 0x3854ff01,
1663 0xbb046594,
1664 0x50bd0256,
1665 0xfc0475fd,
1666 0x6821f550,
1667 0x0464b609,
1668/* 0x0a40: i2c_get_byte_done */
1669/* 0x0a42: i2c_put_byte */
1670 0x47f000f8,
1671/* 0x0a45: i2c_put_byte_next */
1672 0x0142b608,
1673 0xbb3854ff,
1674 0x65b60076,
1675 0x9450f904,
1676 0x56bb0465,
1677 0xfd50bd02,
1678 0x50fc0475,
1679 0x096821f5,
1680 0xf40464b6,
1681 0x46b03411,
1682 0xd81bf400,
1683 0xb60076bb, 1672 0xb60076bb,
1684 0x50f90465, 1673 0x50f90465,
1685 0xbb046594, 1674 0xbb046594,
1686 0x50bd0256, 1675 0x50bd0256,
1687 0xfc0475fd, 1676 0xfc0475fd,
1688 0xa921f550, 1677 0x6121f550,
1689 0x0464b609, 1678 0x0464b609,
1690 0xbb0f11f4, 1679 0xb03411f4,
1691 0x36b00076, 1680 0x1bf40046,
1692 0x061bf401, 1681 0x0076bbd8,
1693/* 0x0a9b: i2c_put_byte_done */
1694 0xf80132f4,
1695/* 0x0a9d: i2c_addr */
1696 0x0076bb00,
1697 0xf90465b6, 1682 0xf90465b6,
1698 0x04659450, 1683 0x04659450,
1699 0xbd0256bb, 1684 0xbd0256bb,
1700 0x0475fd50, 1685 0x0475fd50,
1701 0x21f550fc, 1686 0x21f550fc,
1702 0x64b608d9, 1687 0x64b609a2,
1703 0x2911f404, 1688 0x0f11f404,
1704 0x012ec3e7, 1689 0xb00076bb,
1705 0xfd0134b6, 1690 0x1bf40136,
1706 0x76bb0553, 1691 0x0132f406,
1692/* 0x0a94: i2c_put_byte_done */
1693/* 0x0a96: i2c_addr */
1694 0x76bb00f8,
1707 0x0465b600, 1695 0x0465b600,
1708 0x659450f9, 1696 0x659450f9,
1709 0x0256bb04, 1697 0x0256bb04,
1710 0x75fd50bd, 1698 0x75fd50bd,
1711 0xf550fc04, 1699 0xf550fc04,
1712 0xb60a4221, 1700 0xb608d221,
1713/* 0x0ae2: i2c_addr_done */ 1701 0x11f40464,
1714 0x00f80464, 1702 0x2ec3e729,
1715/* 0x0ae4: i2c_acquire_addr */ 1703 0x0134b601,
1716 0xb6f8cec7, 1704 0xbb0553fd,
1717 0xe0b702e4,
1718 0xee980d1c,
1719/* 0x0af3: i2c_acquire */
1720 0xf500f800,
1721 0xf40ae421,
1722 0xd9f00421,
1723 0x4021f403,
1724/* 0x0b02: i2c_release */
1725 0x21f500f8,
1726 0x21f40ae4,
1727 0x03daf004,
1728 0xf84021f4,
1729/* 0x0b11: i2c_recv */
1730 0x0132f400,
1731 0xb6f8c1c7,
1732 0x16b00214,
1733 0x3a1ff528,
1734 0xf413a001,
1735 0x0032980c,
1736 0x0ccc13a0,
1737 0xf4003198,
1738 0xd0f90231,
1739 0xd0f9e0f9,
1740 0x000067f1,
1741 0x100063f1,
1742 0xbb016792,
1743 0x65b60076, 1705 0x65b60076,
1744 0x9450f904, 1706 0x9450f904,
1745 0x56bb0465, 1707 0x56bb0465,
1746 0xfd50bd02, 1708 0xfd50bd02,
1747 0x50fc0475, 1709 0x50fc0475,
1748 0x0af321f5, 1710 0x0a3b21f5,
1749 0xfc0464b6, 1711/* 0x0adb: i2c_addr_done */
1750 0x00d6b0d0, 1712 0xf80464b6,
1751 0x00b31bf5, 1713/* 0x0add: i2c_acquire_addr */
1752 0xbb0057f0, 1714 0xf8cec700,
1715 0xb702e4b6,
1716 0x980d1ce0,
1717 0x00f800ee,
1718/* 0x0aec: i2c_acquire */
1719 0x0add21f5,
1720 0xf00421f4,
1721 0x21f403d9,
1722/* 0x0afb: i2c_release */
1723 0xf500f840,
1724 0xf40add21,
1725 0xdaf00421,
1726 0x4021f403,
1727/* 0x0b0a: i2c_recv */
1728 0x32f400f8,
1729 0xf8c1c701,
1730 0xb00214b6,
1731 0x1ff52816,
1732 0x13a0013a,
1733 0x32980cf4,
1734 0xcc13a000,
1735 0x0031980c,
1736 0xf90231f4,
1737 0xf9e0f9d0,
1738 0x0067f1d0,
1739 0x0063f100,
1740 0x01679210,
1741 0xb60076bb,
1742 0x50f90465,
1743 0xbb046594,
1744 0x50bd0256,
1745 0xfc0475fd,
1746 0xec21f550,
1747 0x0464b60a,
1748 0xd6b0d0fc,
1749 0xb31bf500,
1750 0x0057f000,
1751 0xb60076bb,
1752 0x50f90465,
1753 0xbb046594,
1754 0x50bd0256,
1755 0xfc0475fd,
1756 0x9621f550,
1757 0x0464b60a,
1758 0x00d011f5,
1759 0xbbe0c5c7,
1753 0x65b60076, 1760 0x65b60076,
1754 0x9450f904, 1761 0x9450f904,
1755 0x56bb0465, 1762 0x56bb0465,
1756 0xfd50bd02, 1763 0xfd50bd02,
1757 0x50fc0475, 1764 0x50fc0475,
1758 0x0a9d21f5, 1765 0x0a3b21f5,
1759 0xf50464b6, 1766 0xf50464b6,
1760 0xc700d011, 1767 0xf000ad11,
1761 0x76bbe0c5, 1768 0x76bb0157,
1762 0x0465b600, 1769 0x0465b600,
1763 0x659450f9, 1770 0x659450f9,
1764 0x0256bb04, 1771 0x0256bb04,
1765 0x75fd50bd, 1772 0x75fd50bd,
1766 0xf550fc04, 1773 0xf550fc04,
1767 0xb60a4221, 1774 0xb60a9621,
1768 0x11f50464, 1775 0x11f50464,
1769 0x57f000ad, 1776 0x76bb008a,
1770 0x0076bb01, 1777 0x0465b600,
1771 0xf90465b6, 1778 0x659450f9,
1772 0x04659450, 1779 0x0256bb04,
1773 0xbd0256bb, 1780 0x75fd50bd,
1774 0x0475fd50, 1781 0xf550fc04,
1775 0x21f550fc, 1782 0xb609e921,
1776 0x64b60a9d, 1783 0x11f40464,
1777 0x8a11f504, 1784 0xe05bcb6a,
1778 0x0076bb00, 1785 0xb60076bb,
1779 0xf90465b6, 1786 0x50f90465,
1780 0x04659450, 1787 0xbb046594,
1781 0xbd0256bb, 1788 0x50bd0256,
1782 0x0475fd50, 1789 0xfc0475fd,
1783 0x21f550fc, 1790 0x2e21f550,
1784 0x64b609f0, 1791 0x0464b609,
1785 0x6a11f404, 1792 0xbd025bb9,
1786 0xbbe05bcb, 1793 0x430ef474,
1787 0x65b60076, 1794/* 0x0c10: i2c_recv_not_rd08 */
1788 0x9450f904, 1795 0xf401d6b0,
1789 0x56bb0465, 1796 0x57f03d1b,
1790 0xfd50bd02, 1797 0x9621f500,
1791 0x50fc0475, 1798 0x3311f40a,
1792 0x093521f5, 1799 0xf5e0c5c7,
1793 0xb90464b6, 1800 0xf40a3b21,
1794 0x74bd025b, 1801 0x57f02911,
1795/* 0x0c17: i2c_recv_not_rd08 */ 1802 0x9621f500,
1796 0xb0430ef4, 1803 0x1f11f40a,
1797 0x1bf401d6, 1804 0xf5e0b5c7,
1798 0x0057f03d, 1805 0xf40a3b21,
1799 0x0a9d21f5, 1806 0x21f51511,
1800 0xc73311f4, 1807 0x74bd092e,
1801 0x21f5e0c5, 1808 0xf408c5c7,
1802 0x11f40a42, 1809 0x32f4091b,
1803 0x0057f029, 1810 0x030ef402,
1804 0x0a9d21f5, 1811/* 0x0c50: i2c_recv_not_wr08 */
1805 0xc71f11f4, 1812/* 0x0c50: i2c_recv_done */
1806 0x21f5e0b5, 1813 0xf5f8cec7,
1807 0x11f40a42, 1814 0xfc0afb21,
1808 0x3521f515, 1815 0xf4d0fce0,
1809 0xc774bd09, 1816 0x7cb90a12,
1810 0x1bf408c5, 1817 0x3621f502,
1811 0x0232f409, 1818/* 0x0c65: i2c_recv_exit */
1812/* 0x0c57: i2c_recv_not_wr08 */ 1819/* 0x0c67: i2c_init */
1813/* 0x0c57: i2c_recv_done */ 1820 0xf800f803,
1814 0xc7030ef4, 1821/* 0x0c69: test_recv */
1815 0x21f5f8ce, 1822 0xd817f100,
1816 0xe0fc0b02, 1823 0x0614b605,
1817 0x12f4d0fc, 1824 0xb60011cf,
1818 0x027cb90a, 1825 0x07f10110,
1819 0x033621f5, 1826 0x04b605d8,
1820/* 0x0c6c: i2c_recv_exit */ 1827 0x0001d006,
1821/* 0x0c6e: i2c_init */ 1828 0xe7f104bd,
1829 0xe3f1d900,
1830 0x21f5134f,
1831 0x00f80256,
1832/* 0x0c90: test_init */
1833 0x0800e7f1,
1834 0x025621f5,
1835/* 0x0c9a: idle_recv */
1822 0x00f800f8, 1836 0x00f800f8,
1823/* 0x0c70: test_recv */ 1837/* 0x0c9c: idle */
1824 0x05d817f1, 1838 0xf10031f4,
1825 0xcf0614b6, 1839 0xb605d417,
1826 0x10b60011, 1840 0x11cf0614,
1827 0xd807f101, 1841 0x0110b600,
1828 0x0604b605, 1842 0x05d407f1,
1829 0xbd0001d0, 1843 0xd00604b6,
1830 0x00e7f104, 1844 0x04bd0001,
1831 0x4fe3f1d9, 1845/* 0x0cb8: idle_loop */
1832 0x5621f513, 1846 0xf45817f0,
1833/* 0x0c97: test_init */ 1847/* 0x0cbe: idle_proc */
1834 0xf100f802, 1848/* 0x0cbe: idle_proc_exec */
1835 0xf50800e7, 1849 0x10f90232,
1836 0xf8025621, 1850 0xf5021eb9,
1837/* 0x0ca1: idle_recv */ 1851 0xfc033f21,
1838/* 0x0ca3: idle */ 1852 0x0911f410,
1839 0xf400f800, 1853 0xf40231f4,
1840 0x17f10031, 1854/* 0x0cd2: idle_proc_next */
1841 0x14b605d4, 1855 0x10b6ef0e,
1842 0x0011cf06, 1856 0x061fb858,
1843 0xf10110b6, 1857 0xf4e61bf4,
1844 0xb605d407, 1858 0x28f4dd02,
1845 0x01d00604, 1859 0xbb0ef400,
1846/* 0x0cbf: idle_loop */ 1860 0x00000000,
1847 0xf004bd00, 1861 0x00000000,
1848 0x32f45817,
1849/* 0x0cc5: idle_proc */
1850/* 0x0cc5: idle_proc_exec */
1851 0xb910f902,
1852 0x21f5021e,
1853 0x10fc033f,
1854 0xf40911f4,
1855 0x0ef40231,
1856/* 0x0cd9: idle_proc_next */
1857 0x5810b6ef,
1858 0xf4061fb8,
1859 0x02f4e61b,
1860 0x0028f4dd,
1861 0x00bb0ef4,
1862 0x00000000, 1862 0x00000000,
1863 0x00000000, 1863 0x00000000,
1864 0x00000000, 1864 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc
index ec03f9a4290b..1663bf943d77 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc
@@ -82,15 +82,15 @@ memx_train_tail:
82// $r0 - zero 82// $r0 - zero
83memx_func_enter: 83memx_func_enter:
84#if NVKM_PPWR_CHIPSET == GT215 84#if NVKM_PPWR_CHIPSET == GT215
85 movw $r8 0x1610 85 mov $r8 0x1610
86 nv_rd32($r7, $r8) 86 nv_rd32($r7, $r8)
87 imm32($r6, 0xfffffffc) 87 imm32($r6, 0xfffffffc)
88 and $r7 $r6 88 and $r7 $r6
89 movw $r6 0x2 89 mov $r6 0x2
90 or $r7 $r6 90 or $r7 $r6
91 nv_wr32($r8, $r7) 91 nv_wr32($r8, $r7)
92#else 92#else
93 movw $r6 0x001620 93 mov $r6 0x001620
94 imm32($r7, ~0x00000aa2); 94 imm32($r7, ~0x00000aa2);
95 nv_rd32($r8, $r6) 95 nv_rd32($r8, $r6)
96 and $r8 $r7 96 and $r8 $r7
@@ -101,7 +101,7 @@ memx_func_enter:
101 and $r8 $r7 101 and $r8 $r7
102 nv_wr32($r6, $r8) 102 nv_wr32($r6, $r8)
103 103
104 movw $r6 0x0026f0 104 mov $r6 0x0026f0
105 nv_rd32($r8, $r6) 105 nv_rd32($r8, $r6)
106 and $r8 $r7 106 and $r8 $r7
107 nv_wr32($r6, $r8) 107 nv_wr32($r6, $r8)
@@ -136,19 +136,19 @@ memx_func_leave:
136 bra nz #memx_func_leave_wait 136 bra nz #memx_func_leave_wait
137 137
138#if NVKM_PPWR_CHIPSET == GT215 138#if NVKM_PPWR_CHIPSET == GT215
139 movw $r8 0x1610 139 mov $r8 0x1610
140 nv_rd32($r7, $r8) 140 nv_rd32($r7, $r8)
141 imm32($r6, 0xffffffcc) 141 imm32($r6, 0xffffffcc)
142 and $r7 $r6 142 and $r7 $r6
143 nv_wr32($r8, $r7) 143 nv_wr32($r8, $r7)
144#else 144#else
145 movw $r6 0x0026f0 145 mov $r6 0x0026f0
146 imm32($r7, 0x00000001) 146 imm32($r7, 0x00000001)
147 nv_rd32($r8, $r6) 147 nv_rd32($r8, $r6)
148 or $r8 $r7 148 or $r8 $r7
149 nv_wr32($r6, $r8) 149 nv_wr32($r6, $r8)
150 150
151 movw $r6 0x001620 151 mov $r6 0x001620
152 nv_rd32($r8, $r6) 152 nv_rd32($r8, $r6)
153 or $r8 $r7 153 or $r8 $r7
154 nv_wr32($r6, $r8) 154 nv_wr32($r6, $r8)
@@ -177,11 +177,11 @@ memx_func_wait_vblank:
177 bra #memx_func_wait_vblank_fini 177 bra #memx_func_wait_vblank_fini
178 178
179 memx_func_wait_vblank_head1: 179 memx_func_wait_vblank_head1:
180 movw $r7 0x20 180 mov $r7 0x20
181 bra #memx_func_wait_vblank_0 181 bra #memx_func_wait_vblank_0
182 182
183 memx_func_wait_vblank_head0: 183 memx_func_wait_vblank_head0:
184 movw $r7 0x8 184 mov $r7 0x8
185 185
186 memx_func_wait_vblank_0: 186 memx_func_wait_vblank_0:
187 nv_iord($r6, NV_PPWR_INPUT) 187 nv_iord($r6, NV_PPWR_INPUT)
@@ -273,13 +273,13 @@ memx_func_train:
273// $r5 - outer loop counter 273// $r5 - outer loop counter
274// $r6 - inner loop counter 274// $r6 - inner loop counter
275// $r7 - entry counter (#memx_train_head + $r7) 275// $r7 - entry counter (#memx_train_head + $r7)
276 movw $r5 0x3 276 mov $r5 0x3
277 movw $r7 0x0 277 mov $r7 0x0
278 278
279// Read random memory to wake up... things 279// Read random memory to wake up... things
280 imm32($r9, 0x700000) 280 imm32($r9, 0x700000)
281 nv_rd32($r8,$r9) 281 nv_rd32($r8,$r9)
282 movw $r14 0x2710 282 mov $r14 0x2710
283 call(nsec) 283 call(nsec)
284 284
285 memx_func_train_loop_outer: 285 memx_func_train_loop_outer:
@@ -289,9 +289,9 @@ memx_func_train:
289 nv_wr32($r9, $r8) 289 nv_wr32($r9, $r8)
290 push $r5 290 push $r5
291 291
292 movw $r6 0x0 292 mov $r6 0x0
293 memx_func_train_loop_inner: 293 memx_func_train_loop_inner:
294 movw $r8 0x1111 294 mov $r8 0x1111
295 mulu $r9 $r6 $r8 295 mulu $r9 $r6 $r8
296 shl b32 $r8 $r9 0x10 296 shl b32 $r8 $r9 0x10
297 or $r8 $r9 297 or $r8 $r9
@@ -315,7 +315,7 @@ memx_func_train:
315 315
316 // $r5 - inner inner loop counter 316 // $r5 - inner inner loop counter
317 // $r9 - result 317 // $r9 - result
318 movw $r5 0 318 mov $r5 0
319 imm32($r9, 0x8300ffff) 319 imm32($r9, 0x8300ffff)
320 memx_func_train_loop_4x: 320 memx_func_train_loop_4x:
321 imm32($r10, 0x100080) 321 imm32($r10, 0x100080)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
index e698f4836521..ed08120eefe0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
@@ -7,8 +7,10 @@ nvkm-y += nvkm/subdev/secboot/acr_r352.o
7nvkm-y += nvkm/subdev/secboot/acr_r361.o 7nvkm-y += nvkm/subdev/secboot/acr_r361.o
8nvkm-y += nvkm/subdev/secboot/acr_r364.o 8nvkm-y += nvkm/subdev/secboot/acr_r364.o
9nvkm-y += nvkm/subdev/secboot/acr_r367.o 9nvkm-y += nvkm/subdev/secboot/acr_r367.o
10nvkm-y += nvkm/subdev/secboot/acr_r370.o
10nvkm-y += nvkm/subdev/secboot/acr_r375.o 11nvkm-y += nvkm/subdev/secboot/acr_r375.o
11nvkm-y += nvkm/subdev/secboot/gm200.o 12nvkm-y += nvkm/subdev/secboot/gm200.o
12nvkm-y += nvkm/subdev/secboot/gm20b.o 13nvkm-y += nvkm/subdev/secboot/gm20b.o
13nvkm-y += nvkm/subdev/secboot/gp102.o 14nvkm-y += nvkm/subdev/secboot/gp102.o
15nvkm-y += nvkm/subdev/secboot/gp108.o
14nvkm-y += nvkm/subdev/secboot/gp10b.o 16nvkm-y += nvkm/subdev/secboot/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
index b615fc81aca4..73a2ac81ac69 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
@@ -64,6 +64,7 @@ struct nvkm_acr *acr_r352_new(unsigned long);
64struct nvkm_acr *acr_r361_new(unsigned long); 64struct nvkm_acr *acr_r361_new(unsigned long);
65struct nvkm_acr *acr_r364_new(unsigned long); 65struct nvkm_acr *acr_r364_new(unsigned long);
66struct nvkm_acr *acr_r367_new(enum nvkm_secboot_falcon, unsigned long); 66struct nvkm_acr *acr_r367_new(enum nvkm_secboot_falcon, unsigned long);
67struct nvkm_acr *acr_r370_new(enum nvkm_secboot_falcon, unsigned long);
67struct nvkm_acr *acr_r375_new(enum nvkm_secboot_falcon, unsigned long); 68struct nvkm_acr *acr_r375_new(enum nvkm_secboot_falcon, unsigned long);
68 69
69#endif 70#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
new file mode 100644
index 000000000000..2f890dfae7fc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
@@ -0,0 +1,144 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "acr_r370.h"
24#include "acr_r367.h"
25
26#include <core/msgqueue.h>
27#include <engine/falcon.h>
28#include <engine/sec2.h>
29
30static void
31acr_r370_generate_flcn_bl_desc(const struct nvkm_acr *acr,
32 const struct ls_ucode_img *img, u64 wpr_addr,
33 void *_desc)
34{
35 struct acr_r370_flcn_bl_desc *desc = _desc;
36 const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
37 u64 base, addr_code, addr_data;
38
39 base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
40 addr_code = base + pdesc->app_resident_code_offset;
41 addr_data = base + pdesc->app_resident_data_offset;
42
43 desc->ctx_dma = FALCON_DMAIDX_UCODE;
44 desc->code_dma_base = u64_to_flcn64(addr_code);
45 desc->non_sec_code_off = pdesc->app_resident_code_offset;
46 desc->non_sec_code_size = pdesc->app_resident_code_size;
47 desc->code_entry_point = pdesc->app_imem_entry;
48 desc->data_dma_base = u64_to_flcn64(addr_data);
49 desc->data_size = pdesc->app_resident_data_size;
50}
51
52const struct acr_r352_ls_func
53acr_r370_ls_fecs_func = {
54 .load = acr_ls_ucode_load_fecs,
55 .generate_bl_desc = acr_r370_generate_flcn_bl_desc,
56 .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
57};
58
59const struct acr_r352_ls_func
60acr_r370_ls_gpccs_func = {
61 .load = acr_ls_ucode_load_gpccs,
62 .generate_bl_desc = acr_r370_generate_flcn_bl_desc,
63 .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
64 /* GPCCS will be loaded using PRI */
65 .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
66};
67
68static void
69acr_r370_generate_sec2_bl_desc(const struct nvkm_acr *acr,
70 const struct ls_ucode_img *img, u64 wpr_addr,
71 void *_desc)
72{
73 const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
74 const struct nvkm_sec2 *sec = acr->subdev->device->sec2;
75 struct acr_r370_flcn_bl_desc *desc = _desc;
76 u64 base, addr_code, addr_data;
77 u32 addr_args;
78
79 base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
80 /* For some reason we should not add app_resident_code_offset here */
81 addr_code = base;
82 addr_data = base + pdesc->app_resident_data_offset;
83 addr_args = sec->falcon->data.limit;
84 addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
85
86 desc->ctx_dma = FALCON_SEC2_DMAIDX_UCODE;
87 desc->code_dma_base = u64_to_flcn64(addr_code);
88 desc->non_sec_code_off = pdesc->app_resident_code_offset;
89 desc->non_sec_code_size = pdesc->app_resident_code_size;
90 desc->code_entry_point = pdesc->app_imem_entry;
91 desc->data_dma_base = u64_to_flcn64(addr_data);
92 desc->data_size = pdesc->app_resident_data_size;
93 desc->argc = 1;
94 /* args are stored at the beginning of EMEM */
95 desc->argv = 0x01000000;
96}
97
98const struct acr_r352_ls_func
99acr_r370_ls_sec2_func = {
100 .load = acr_ls_ucode_load_sec2,
101 .generate_bl_desc = acr_r370_generate_sec2_bl_desc,
102 .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
103 .post_run = acr_ls_sec2_post_run,
104};
105
106void
107acr_r370_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
108 u64 offset)
109{
110 struct acr_r370_flcn_bl_desc *bl_desc = _bl_desc;
111
112 bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
113 bl_desc->non_sec_code_off = hdr->non_sec_code_off;
114 bl_desc->non_sec_code_size = hdr->non_sec_code_size;
115 bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
116 bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
117 bl_desc->code_entry_point = 0;
118 bl_desc->code_dma_base = u64_to_flcn64(offset);
119 bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
120 bl_desc->data_size = hdr->data_size;
121}
122
123const struct acr_r352_func
124acr_r370_func = {
125 .fixup_hs_desc = acr_r367_fixup_hs_desc,
126 .generate_hs_bl_desc = acr_r370_generate_hs_bl_desc,
127 .hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
128 .shadow_blob = true,
129 .ls_ucode_img_load = acr_r367_ls_ucode_img_load,
130 .ls_fill_headers = acr_r367_ls_fill_headers,
131 .ls_write_wpr = acr_r367_ls_write_wpr,
132 .ls_func = {
133 [NVKM_SECBOOT_FALCON_SEC2] = &acr_r370_ls_sec2_func,
134 [NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func,
135 [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func,
136 },
137};
138
139struct nvkm_acr *
140acr_r370_new(enum nvkm_secboot_falcon boot_falcon,
141 unsigned long managed_falcons)
142{
143 return acr_r352_new_(&acr_r370_func, boot_falcon, managed_falcons);
144}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
new file mode 100644
index 000000000000..3426f86a15e4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
@@ -0,0 +1,49 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __NVKM_SECBOOT_ACR_R370_H__
24#define __NVKM_SECBOOT_ACR_R370_H__
25
26#include "priv.h"
27struct hsf_load_header;
28
29/* Same as acr_r361_flcn_bl_desc, plus argc/argv */
30struct acr_r370_flcn_bl_desc {
31 u32 reserved[4];
32 u32 signature[4];
33 u32 ctx_dma;
34 struct flcn_u64 code_dma_base;
35 u32 non_sec_code_off;
36 u32 non_sec_code_size;
37 u32 sec_code_off;
38 u32 sec_code_size;
39 u32 code_entry_point;
40 struct flcn_u64 data_dma_base;
41 u32 data_size;
42 u32 argc;
43 u32 argv;
44};
45
46void acr_r370_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
47extern const struct acr_r352_ls_func acr_r370_ls_fecs_func;
48extern const struct acr_r352_ls_func acr_r370_ls_gpccs_func;
49#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
index ddb795bb007b..7bdef93cb7ae 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
@@ -20,90 +20,12 @@
20 * DEALINGS IN THE SOFTWARE. 20 * DEALINGS IN THE SOFTWARE.
21 */ 21 */
22 22
23#include "acr_r370.h"
23#include "acr_r367.h" 24#include "acr_r367.h"
24 25
25#include <engine/falcon.h>
26#include <core/msgqueue.h> 26#include <core/msgqueue.h>
27#include <subdev/pmu.h> 27#include <subdev/pmu.h>
28 28
29/*
30 * r375 ACR: similar to r367, but with a unified bootloader descriptor
31 * structure for GR and PMU falcons.
32 */
33
34/* Same as acr_r361_flcn_bl_desc, plus argc/argv */
35struct acr_r375_flcn_bl_desc {
36 u32 reserved[4];
37 u32 signature[4];
38 u32 ctx_dma;
39 struct flcn_u64 code_dma_base;
40 u32 non_sec_code_off;
41 u32 non_sec_code_size;
42 u32 sec_code_off;
43 u32 sec_code_size;
44 u32 code_entry_point;
45 struct flcn_u64 data_dma_base;
46 u32 data_size;
47 u32 argc;
48 u32 argv;
49};
50
51static void
52acr_r375_generate_flcn_bl_desc(const struct nvkm_acr *acr,
53 const struct ls_ucode_img *img, u64 wpr_addr,
54 void *_desc)
55{
56 struct acr_r375_flcn_bl_desc *desc = _desc;
57 const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
58 u64 base, addr_code, addr_data;
59
60 base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
61 addr_code = base + pdesc->app_resident_code_offset;
62 addr_data = base + pdesc->app_resident_data_offset;
63
64 desc->ctx_dma = FALCON_DMAIDX_UCODE;
65 desc->code_dma_base = u64_to_flcn64(addr_code);
66 desc->non_sec_code_off = pdesc->app_resident_code_offset;
67 desc->non_sec_code_size = pdesc->app_resident_code_size;
68 desc->code_entry_point = pdesc->app_imem_entry;
69 desc->data_dma_base = u64_to_flcn64(addr_data);
70 desc->data_size = pdesc->app_resident_data_size;
71}
72
73static void
74acr_r375_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
75 u64 offset)
76{
77 struct acr_r375_flcn_bl_desc *bl_desc = _bl_desc;
78
79 bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
80 bl_desc->non_sec_code_off = hdr->non_sec_code_off;
81 bl_desc->non_sec_code_size = hdr->non_sec_code_size;
82 bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
83 bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
84 bl_desc->code_entry_point = 0;
85 bl_desc->code_dma_base = u64_to_flcn64(offset);
86 bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
87 bl_desc->data_size = hdr->data_size;
88}
89
90const struct acr_r352_ls_func
91acr_r375_ls_fecs_func = {
92 .load = acr_ls_ucode_load_fecs,
93 .generate_bl_desc = acr_r375_generate_flcn_bl_desc,
94 .bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
95};
96
97const struct acr_r352_ls_func
98acr_r375_ls_gpccs_func = {
99 .load = acr_ls_ucode_load_gpccs,
100 .generate_bl_desc = acr_r375_generate_flcn_bl_desc,
101 .bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
102 /* GPCCS will be loaded using PRI */
103 .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
104};
105
106
107static void 29static void
108acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr, 30acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr,
109 const struct ls_ucode_img *img, u64 wpr_addr, 31 const struct ls_ucode_img *img, u64 wpr_addr,
@@ -111,7 +33,7 @@ acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr,
111{ 33{
112 const struct ls_ucode_img_desc *pdesc = &img->ucode_desc; 34 const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
113 const struct nvkm_pmu *pmu = acr->subdev->device->pmu; 35 const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
114 struct acr_r375_flcn_bl_desc *desc = _desc; 36 struct acr_r370_flcn_bl_desc *desc = _desc;
115 u64 base, addr_code, addr_data; 37 u64 base, addr_code, addr_data;
116 u32 addr_args; 38 u32 addr_args;
117 39
@@ -136,23 +58,22 @@ const struct acr_r352_ls_func
136acr_r375_ls_pmu_func = { 58acr_r375_ls_pmu_func = {
137 .load = acr_ls_ucode_load_pmu, 59 .load = acr_ls_ucode_load_pmu,
138 .generate_bl_desc = acr_r375_generate_pmu_bl_desc, 60 .generate_bl_desc = acr_r375_generate_pmu_bl_desc,
139 .bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc), 61 .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
140 .post_run = acr_ls_pmu_post_run, 62 .post_run = acr_ls_pmu_post_run,
141}; 63};
142 64
143
144const struct acr_r352_func 65const struct acr_r352_func
145acr_r375_func = { 66acr_r375_func = {
146 .fixup_hs_desc = acr_r367_fixup_hs_desc, 67 .fixup_hs_desc = acr_r367_fixup_hs_desc,
147 .generate_hs_bl_desc = acr_r375_generate_hs_bl_desc, 68 .generate_hs_bl_desc = acr_r370_generate_hs_bl_desc,
148 .hs_bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc), 69 .hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
149 .shadow_blob = true, 70 .shadow_blob = true,
150 .ls_ucode_img_load = acr_r367_ls_ucode_img_load, 71 .ls_ucode_img_load = acr_r367_ls_ucode_img_load,
151 .ls_fill_headers = acr_r367_ls_fill_headers, 72 .ls_fill_headers = acr_r367_ls_fill_headers,
152 .ls_write_wpr = acr_r367_ls_write_wpr, 73 .ls_write_wpr = acr_r367_ls_write_wpr,
153 .ls_func = { 74 .ls_func = {
154 [NVKM_SECBOOT_FALCON_FECS] = &acr_r375_ls_fecs_func, 75 [NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func,
155 [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r375_ls_gpccs_func, 76 [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func,
156 [NVKM_SECBOOT_FALCON_PMU] = &acr_r375_ls_pmu_func, 77 [NVKM_SECBOOT_FALCON_PMU] = &acr_r375_ls_pmu_func,
157 }, 78 },
158}; 79};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
index f3b3c66349d2..1f7a3c1a7f50 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
@@ -133,7 +133,7 @@ gp102_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
133 return gm200_secboot_run_blob(sb, blob, falcon); 133 return gm200_secboot_run_blob(sb, blob, falcon);
134} 134}
135 135
136static const struct nvkm_secboot_func 136const struct nvkm_secboot_func
137gp102_secboot = { 137gp102_secboot = {
138 .dtor = gm200_secboot_dtor, 138 .dtor = gm200_secboot_dtor,
139 .oneinit = gm200_secboot_oneinit, 139 .oneinit = gm200_secboot_oneinit,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c
new file mode 100644
index 000000000000..e8c27ec700de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c
@@ -0,0 +1,67 @@
1/*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "gm200.h"
23#include "acr.h"
24
25int
26gp108_secboot_new(struct nvkm_device *device, int index,
27 struct nvkm_secboot **psb)
28{
29 struct gm200_secboot *gsb;
30 struct nvkm_acr *acr;
31
32 acr = acr_r370_new(NVKM_SECBOOT_FALCON_SEC2,
33 BIT(NVKM_SECBOOT_FALCON_FECS) |
34 BIT(NVKM_SECBOOT_FALCON_GPCCS) |
35 BIT(NVKM_SECBOOT_FALCON_SEC2));
36 if (IS_ERR(acr))
37 return PTR_ERR(acr);
38
39 if (!(gsb = kzalloc(sizeof(*gsb), GFP_KERNEL))) {
40 acr->func->dtor(acr);
41 return -ENOMEM;
42 }
43 *psb = &gsb->base;
44
45 return nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base);
46}
47
48MODULE_FIRMWARE("nvidia/gp108/acr/bl.bin");
49MODULE_FIRMWARE("nvidia/gp108/acr/unload_bl.bin");
50MODULE_FIRMWARE("nvidia/gp108/acr/ucode_load.bin");
51MODULE_FIRMWARE("nvidia/gp108/acr/ucode_unload.bin");
52MODULE_FIRMWARE("nvidia/gp108/gr/fecs_bl.bin");
53MODULE_FIRMWARE("nvidia/gp108/gr/fecs_inst.bin");
54MODULE_FIRMWARE("nvidia/gp108/gr/fecs_data.bin");
55MODULE_FIRMWARE("nvidia/gp108/gr/fecs_sig.bin");
56MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_bl.bin");
57MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_inst.bin");
58MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_data.bin");
59MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_sig.bin");
60MODULE_FIRMWARE("nvidia/gp108/gr/sw_ctx.bin");
61MODULE_FIRMWARE("nvidia/gp108/gr/sw_nonctx.bin");
62MODULE_FIRMWARE("nvidia/gp108/gr/sw_bundle_init.bin");
63MODULE_FIRMWARE("nvidia/gp108/gr/sw_method_init.bin");
64MODULE_FIRMWARE("nvidia/gp108/nvdec/scrubber.bin");
65MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin");
66MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin");
67MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
index d9091f029506..959a7b2dbdc9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
@@ -40,6 +40,8 @@ int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_acr *,
40int nvkm_secboot_falcon_reset(struct nvkm_secboot *); 40int nvkm_secboot_falcon_reset(struct nvkm_secboot *);
41int nvkm_secboot_falcon_run(struct nvkm_secboot *); 41int nvkm_secboot_falcon_run(struct nvkm_secboot *);
42 42
43extern const struct nvkm_secboot_func gp102_secboot;
44
43struct flcn_u64 { 45struct flcn_u64 {
44 u32 lo; 46 u32 lo;
45 u32 hi; 47 u32 hi;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
index 7ba56b12badd..550702eab0b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
@@ -9,7 +9,9 @@ nvkm-y += nvkm/subdev/therm/nv40.o
9nvkm-y += nvkm/subdev/therm/nv50.o 9nvkm-y += nvkm/subdev/therm/nv50.o
10nvkm-y += nvkm/subdev/therm/g84.o 10nvkm-y += nvkm/subdev/therm/g84.o
11nvkm-y += nvkm/subdev/therm/gt215.o 11nvkm-y += nvkm/subdev/therm/gt215.o
12nvkm-y += nvkm/subdev/therm/gf100.o
12nvkm-y += nvkm/subdev/therm/gf119.o 13nvkm-y += nvkm/subdev/therm/gf119.o
14nvkm-y += nvkm/subdev/therm/gk104.o
13nvkm-y += nvkm/subdev/therm/gm107.o 15nvkm-y += nvkm/subdev/therm/gm107.o
14nvkm-y += nvkm/subdev/therm/gm200.o 16nvkm-y += nvkm/subdev/therm/gm200.o
15nvkm-y += nvkm/subdev/therm/gp100.o 17nvkm-y += nvkm/subdev/therm/gp100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index f27fc6d0d4c6..bf62303571b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -21,6 +21,7 @@
21 * 21 *
22 * Authors: Martin Peres 22 * Authors: Martin Peres
23 */ 23 */
24#include <nvkm/core/option.h>
24#include "priv.h" 25#include "priv.h"
25 26
26int 27int
@@ -297,6 +298,38 @@ nvkm_therm_attr_set(struct nvkm_therm *therm,
297 return -EINVAL; 298 return -EINVAL;
298} 299}
299 300
301void
302nvkm_therm_clkgate_enable(struct nvkm_therm *therm)
303{
304 if (!therm->func->clkgate_enable || !therm->clkgating_enabled)
305 return;
306
307 nvkm_debug(&therm->subdev,
308 "Enabling clockgating\n");
309 therm->func->clkgate_enable(therm);
310}
311
312void
313nvkm_therm_clkgate_fini(struct nvkm_therm *therm, bool suspend)
314{
315 if (!therm->func->clkgate_fini || !therm->clkgating_enabled)
316 return;
317
318 nvkm_debug(&therm->subdev,
319 "Preparing clockgating for %s\n",
320 suspend ? "suspend" : "fini");
321 therm->func->clkgate_fini(therm, suspend);
322}
323
324static void
325nvkm_therm_clkgate_oneinit(struct nvkm_therm *therm)
326{
327 if (!therm->func->clkgate_enable || !therm->clkgating_enabled)
328 return;
329
330 nvkm_info(&therm->subdev, "Clockgating enabled\n");
331}
332
300static void 333static void
301nvkm_therm_intr(struct nvkm_subdev *subdev) 334nvkm_therm_intr(struct nvkm_subdev *subdev)
302{ 335{
@@ -333,6 +366,7 @@ nvkm_therm_oneinit(struct nvkm_subdev *subdev)
333 nvkm_therm_fan_ctor(therm); 366 nvkm_therm_fan_ctor(therm);
334 nvkm_therm_fan_mode(therm, NVKM_THERM_CTRL_AUTO); 367 nvkm_therm_fan_mode(therm, NVKM_THERM_CTRL_AUTO);
335 nvkm_therm_sensor_preinit(therm); 368 nvkm_therm_sensor_preinit(therm);
369 nvkm_therm_clkgate_oneinit(therm);
336 return 0; 370 return 0;
337} 371}
338 372
@@ -357,6 +391,16 @@ nvkm_therm_init(struct nvkm_subdev *subdev)
357 return 0; 391 return 0;
358} 392}
359 393
394void
395nvkm_therm_clkgate_init(struct nvkm_therm *therm,
396 const struct nvkm_therm_clkgate_pack *p)
397{
398 if (!therm->func->clkgate_init || !therm->clkgating_enabled)
399 return;
400
401 therm->func->clkgate_init(therm, p);
402}
403
360static void * 404static void *
361nvkm_therm_dtor(struct nvkm_subdev *subdev) 405nvkm_therm_dtor(struct nvkm_subdev *subdev)
362{ 406{
@@ -374,15 +418,10 @@ nvkm_therm = {
374 .intr = nvkm_therm_intr, 418 .intr = nvkm_therm_intr,
375}; 419};
376 420
377int 421void
378nvkm_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device, 422nvkm_therm_ctor(struct nvkm_therm *therm, struct nvkm_device *device,
379 int index, struct nvkm_therm **ptherm) 423 int index, const struct nvkm_therm_func *func)
380{ 424{
381 struct nvkm_therm *therm;
382
383 if (!(therm = *ptherm = kzalloc(sizeof(*therm), GFP_KERNEL)))
384 return -ENOMEM;
385
386 nvkm_subdev_ctor(&nvkm_therm, device, index, &therm->subdev); 425 nvkm_subdev_ctor(&nvkm_therm, device, index, &therm->subdev);
387 therm->func = func; 426 therm->func = func;
388 427
@@ -395,5 +434,20 @@ nvkm_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device,
395 therm->attr_get = nvkm_therm_attr_get; 434 therm->attr_get = nvkm_therm_attr_get;
396 therm->attr_set = nvkm_therm_attr_set; 435 therm->attr_set = nvkm_therm_attr_set;
397 therm->mode = therm->suspend = -1; /* undefined */ 436 therm->mode = therm->suspend = -1; /* undefined */
437
438 therm->clkgating_enabled = nvkm_boolopt(device->cfgopt,
439 "NvPmEnableGating", false);
440}
441
442int
443nvkm_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device,
444 int index, struct nvkm_therm **ptherm)
445{
446 struct nvkm_therm *therm;
447
448 if (!(therm = *ptherm = kzalloc(sizeof(*therm), GFP_KERNEL)))
449 return -ENOMEM;
450
451 nvkm_therm_ctor(therm, device, index, func);
398 return 0; 452 return 0;
399} 453}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.c
new file mode 100644
index 000000000000..5ae6913320e8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.c
@@ -0,0 +1,58 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Lyude Paul
23 */
24#include <core/device.h>
25
26#include "priv.h"
27
28#define pack_for_each_init(init, pack, head) \
29 for (pack = head; pack && pack->init; pack++) \
30 for (init = pack->init; init && init->count; init++)
31void
32gf100_clkgate_init(struct nvkm_therm *therm,
33 const struct nvkm_therm_clkgate_pack *p)
34{
35 struct nvkm_device *device = therm->subdev.device;
36 const struct nvkm_therm_clkgate_pack *pack;
37 const struct nvkm_therm_clkgate_init *init;
38 u32 next, addr;
39
40 pack_for_each_init(init, pack, p) {
41 next = init->addr + init->count * 8;
42 addr = init->addr;
43
44 nvkm_trace(&therm->subdev, "{ 0x%06x, %d, 0x%08x }\n",
45 init->addr, init->count, init->data);
46 while (addr < next) {
47 nvkm_trace(&therm->subdev, "\t0x%06x = 0x%08x\n",
48 addr, init->data);
49 nvkm_wr32(device, addr, init->data);
50 addr += 8;
51 }
52 }
53}
54
55/*
56 * TODO: Fermi clockgating isn't understood fully yet, so we don't specify any
57 * clockgate functions to use
58 */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.h
new file mode 100644
index 000000000000..cfb25af77c60
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.h
@@ -0,0 +1,35 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Lyude Paul
23 */
24
25#ifndef __GF100_THERM_H__
26#define __GF100_THERM_H__
27
28#include <core/device.h>
29
30struct gf100_idle_filter {
31 u32 fecs;
32 u32 hubmmu;
33};
34
35#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
index 06dcfd6ee966..0981b02790e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
@@ -49,7 +49,7 @@ pwm_info(struct nvkm_therm *therm, int line)
49 return -ENODEV; 49 return -ENODEV;
50} 50}
51 51
52static int 52int
53gf119_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable) 53gf119_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
54{ 54{
55 struct nvkm_device *device = therm->subdev.device; 55 struct nvkm_device *device = therm->subdev.device;
@@ -63,7 +63,7 @@ gf119_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
63 return 0; 63 return 0;
64} 64}
65 65
66static int 66int
67gf119_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty) 67gf119_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
68{ 68{
69 struct nvkm_device *device = therm->subdev.device; 69 struct nvkm_device *device = therm->subdev.device;
@@ -85,7 +85,7 @@ gf119_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
85 return -EINVAL; 85 return -EINVAL;
86} 86}
87 87
88static int 88int
89gf119_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty) 89gf119_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
90{ 90{
91 struct nvkm_device *device = therm->subdev.device; 91 struct nvkm_device *device = therm->subdev.device;
@@ -102,7 +102,7 @@ gf119_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
102 return 0; 102 return 0;
103} 103}
104 104
105static int 105int
106gf119_fan_pwm_clock(struct nvkm_therm *therm, int line) 106gf119_fan_pwm_clock(struct nvkm_therm *therm, int line)
107{ 107{
108 struct nvkm_device *device = therm->subdev.device; 108 struct nvkm_device *device = therm->subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c
new file mode 100644
index 000000000000..4e03971d2e3d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c
@@ -0,0 +1,136 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Lyude Paul
23 */
24#include <core/device.h>
25
26#include "priv.h"
27#include "gk104.h"
28
29void
30gk104_clkgate_enable(struct nvkm_therm *base)
31{
32 struct gk104_therm *therm = gk104_therm(base);
33 struct nvkm_device *dev = therm->base.subdev.device;
34 const struct gk104_clkgate_engine_info *order = therm->clkgate_order;
35 int i;
36
37 /* Program ENG_MANT, ENG_FILTER */
38 for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) {
39 if (!nvkm_device_subdev(dev, order[i].engine))
40 continue;
41
42 nvkm_mask(dev, 0x20200 + order[i].offset, 0xff00, 0x4500);
43 }
44
45 /* magic */
46 nvkm_wr32(dev, 0x020288, therm->idle_filter->fecs);
47 nvkm_wr32(dev, 0x02028c, therm->idle_filter->hubmmu);
48
49 /* Enable clockgating (ENG_CLK = RUN->AUTO) */
50 for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) {
51 if (!nvkm_device_subdev(dev, order[i].engine))
52 continue;
53
54 nvkm_mask(dev, 0x20200 + order[i].offset, 0x00ff, 0x0045);
55 }
56}
57
58void
59gk104_clkgate_fini(struct nvkm_therm *base, bool suspend)
60{
61 struct gk104_therm *therm = gk104_therm(base);
62 struct nvkm_device *dev = therm->base.subdev.device;
63 const struct gk104_clkgate_engine_info *order = therm->clkgate_order;
64 int i;
65
66 /* ENG_CLK = AUTO->RUN, ENG_PWR = RUN->AUTO */
67 for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) {
68 if (!nvkm_device_subdev(dev, order[i].engine))
69 continue;
70
71 nvkm_mask(dev, 0x20200 + order[i].offset, 0xff, 0x54);
72 }
73}
74
75const struct gk104_clkgate_engine_info gk104_clkgate_engine_info[] = {
76 { NVKM_ENGINE_GR, 0x00 },
77 { NVKM_ENGINE_MSPDEC, 0x04 },
78 { NVKM_ENGINE_MSPPP, 0x08 },
79 { NVKM_ENGINE_MSVLD, 0x0c },
80 { NVKM_ENGINE_CE0, 0x10 },
81 { NVKM_ENGINE_CE1, 0x14 },
82 { NVKM_ENGINE_MSENC, 0x18 },
83 { NVKM_ENGINE_CE2, 0x1c },
84 { NVKM_SUBDEV_NR, 0 },
85};
86
87const struct gf100_idle_filter gk104_idle_filter = {
88 .fecs = 0x00001000,
89 .hubmmu = 0x00001000,
90};
91
92static const struct nvkm_therm_func
93gk104_therm_func = {
94 .init = gf119_therm_init,
95 .fini = g84_therm_fini,
96 .pwm_ctrl = gf119_fan_pwm_ctrl,
97 .pwm_get = gf119_fan_pwm_get,
98 .pwm_set = gf119_fan_pwm_set,
99 .pwm_clock = gf119_fan_pwm_clock,
100 .temp_get = g84_temp_get,
101 .fan_sense = gt215_therm_fan_sense,
102 .program_alarms = nvkm_therm_program_alarms_polling,
103 .clkgate_init = gf100_clkgate_init,
104 .clkgate_enable = gk104_clkgate_enable,
105 .clkgate_fini = gk104_clkgate_fini,
106};
107
108static int
109gk104_therm_new_(const struct nvkm_therm_func *func,
110 struct nvkm_device *device,
111 int index,
112 const struct gk104_clkgate_engine_info *clkgate_order,
113 const struct gf100_idle_filter *idle_filter,
114 struct nvkm_therm **ptherm)
115{
116 struct gk104_therm *therm = kzalloc(sizeof(*therm), GFP_KERNEL);
117
118 if (!therm)
119 return -ENOMEM;
120
121 nvkm_therm_ctor(&therm->base, device, index, func);
122 *ptherm = &therm->base;
123 therm->clkgate_order = clkgate_order;
124 therm->idle_filter = idle_filter;
125
126 return 0;
127}
128
129int
130gk104_therm_new(struct nvkm_device *device,
131 int index, struct nvkm_therm **ptherm)
132{
133 return gk104_therm_new_(&gk104_therm_func, device, index,
134 gk104_clkgate_engine_info, &gk104_idle_filter,
135 ptherm);
136}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h
new file mode 100644
index 000000000000..293e7743b19b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h
@@ -0,0 +1,48 @@
1/*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Lyude Paul
23 */
24
25#ifndef __GK104_THERM_H__
26#define __GK104_THERM_H__
27#define gk104_therm(p) (container_of((p), struct gk104_therm, base))
28
29#include <subdev/therm.h>
30#include "priv.h"
31#include "gf100.h"
32
33struct gk104_clkgate_engine_info {
34 enum nvkm_devidx engine;
35 u8 offset;
36};
37
38struct gk104_therm {
39 struct nvkm_therm base;
40
41 const struct gk104_clkgate_engine_info *clkgate_order;
42 const struct gf100_idle_filter *idle_filter;
43};
44
45extern const struct gk104_clkgate_engine_info gk104_clkgate_engine_info[];
46extern const struct gf100_idle_filter gk104_idle_filter;
47
48#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
index c08097f2aff5..4caf401d001a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
@@ -36,7 +36,7 @@ gt215_therm_fan_sense(struct nvkm_therm *therm)
36 return -ENODEV; 36 return -ENODEV;
37} 37}
38 38
39static void 39void
40gt215_therm_init(struct nvkm_therm *therm) 40gt215_therm_init(struct nvkm_therm *therm)
41{ 41{
42 struct nvkm_device *device = therm->subdev.device; 42 struct nvkm_device *device = therm->subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
index 1f46e371d7c4..21659daf1864 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
@@ -32,6 +32,8 @@
32 32
33int nvkm_therm_new_(const struct nvkm_therm_func *, struct nvkm_device *, 33int nvkm_therm_new_(const struct nvkm_therm_func *, struct nvkm_device *,
34 int index, struct nvkm_therm **); 34 int index, struct nvkm_therm **);
35void nvkm_therm_ctor(struct nvkm_therm *therm, struct nvkm_device *device,
36 int index, const struct nvkm_therm_func *func);
35 37
36struct nvkm_fan { 38struct nvkm_fan {
37 struct nvkm_therm *parent; 39 struct nvkm_therm *parent;
@@ -66,8 +68,6 @@ int nvkm_therm_fan_set(struct nvkm_therm *, bool now, int percent);
66int nvkm_therm_fan_user_get(struct nvkm_therm *); 68int nvkm_therm_fan_user_get(struct nvkm_therm *);
67int nvkm_therm_fan_user_set(struct nvkm_therm *, int percent); 69int nvkm_therm_fan_user_set(struct nvkm_therm *, int percent);
68 70
69int nvkm_therm_preinit(struct nvkm_therm *);
70
71int nvkm_therm_sensor_init(struct nvkm_therm *); 71int nvkm_therm_sensor_init(struct nvkm_therm *);
72int nvkm_therm_sensor_fini(struct nvkm_therm *, bool suspend); 72int nvkm_therm_sensor_fini(struct nvkm_therm *, bool suspend);
73void nvkm_therm_sensor_preinit(struct nvkm_therm *); 73void nvkm_therm_sensor_preinit(struct nvkm_therm *);
@@ -96,6 +96,11 @@ struct nvkm_therm_func {
96 int (*fan_sense)(struct nvkm_therm *); 96 int (*fan_sense)(struct nvkm_therm *);
97 97
98 void (*program_alarms)(struct nvkm_therm *); 98 void (*program_alarms)(struct nvkm_therm *);
99
100 void (*clkgate_init)(struct nvkm_therm *,
101 const struct nvkm_therm_clkgate_pack *);
102 void (*clkgate_enable)(struct nvkm_therm *);
103 void (*clkgate_fini)(struct nvkm_therm *, bool);
99}; 104};
100 105
101void nv40_therm_intr(struct nvkm_therm *); 106void nv40_therm_intr(struct nvkm_therm *);
@@ -111,9 +116,21 @@ void g84_therm_fini(struct nvkm_therm *);
111 116
112int gt215_therm_fan_sense(struct nvkm_therm *); 117int gt215_therm_fan_sense(struct nvkm_therm *);
113 118
119void gf100_clkgate_init(struct nvkm_therm *,
120 const struct nvkm_therm_clkgate_pack *);
121
114void g84_therm_init(struct nvkm_therm *); 122void g84_therm_init(struct nvkm_therm *);
123
124int gf119_fan_pwm_ctrl(struct nvkm_therm *, int, bool);
125int gf119_fan_pwm_get(struct nvkm_therm *, int, u32 *, u32 *);
126int gf119_fan_pwm_set(struct nvkm_therm *, int, u32, u32);
127int gf119_fan_pwm_clock(struct nvkm_therm *, int);
115void gf119_therm_init(struct nvkm_therm *); 128void gf119_therm_init(struct nvkm_therm *);
116 129
130void gk104_therm_init(struct nvkm_therm *);
131void gk104_clkgate_enable(struct nvkm_therm *);
132void gk104_clkgate_fini(struct nvkm_therm *, bool);
133
117int nvkm_fanpwm_create(struct nvkm_therm *, struct dcb_gpio_func *); 134int nvkm_fanpwm_create(struct nvkm_therm *, struct dcb_gpio_func *);
118int nvkm_fantog_create(struct nvkm_therm *, struct dcb_gpio_func *); 135int nvkm_fantog_create(struct nvkm_therm *, struct dcb_gpio_func *);
119int nvkm_fannil_create(struct nvkm_therm *); 136int nvkm_fannil_create(struct nvkm_therm *);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index d34d1cf33895..95f4db70dd22 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -995,7 +995,7 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
995 /* calc dclk divider with current vco freq */ 995 /* calc dclk divider with current vco freq */
996 dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk, 996 dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
997 pd_min, pd_even); 997 pd_min, pd_even);
998 if (vclk_div > pd_max) 998 if (dclk_div > pd_max)
999 break; /* vco is too big, it has to stop */ 999 break; /* vco is too big, it has to stop */
1000 1000
1001 /* calc score with current vco freq */ 1001 /* calc score with current vco freq */
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 893003fc76a1..2fef09a56d16 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1727,7 +1727,7 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
1727 kref_get(&bo->list_kref); 1727 kref_get(&bo->list_kref);
1728 1728
1729 if (!list_empty(&bo->ddestroy)) { 1729 if (!list_empty(&bo->ddestroy)) {
1730 ret = ttm_bo_cleanup_refs(bo, false, false, true); 1730 ret = ttm_bo_cleanup_refs(bo, false, false, locked);
1731 kref_put(&bo->list_kref, ttm_bo_release_list); 1731 kref_put(&bo->list_kref, ttm_bo_release_list);
1732 return ret; 1732 return ret;
1733 } 1733 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 08a3c324242e..60fcef1593dd 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -316,7 +316,7 @@ static void ttm_bo_vm_close(struct vm_area_struct *vma)
316 316
317static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, 317static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
318 unsigned long offset, 318 unsigned long offset,
319 void *buf, int len, int write) 319 uint8_t *buf, int len, int write)
320{ 320{
321 unsigned long page = offset >> PAGE_SHIFT; 321 unsigned long page = offset >> PAGE_SHIFT;
322 unsigned long bytes_left = len; 322 unsigned long bytes_left = len;
@@ -345,6 +345,7 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
345 ttm_bo_kunmap(&map); 345 ttm_bo_kunmap(&map);
346 346
347 page++; 347 page++;
348 buf += bytes;
348 bytes_left -= bytes; 349 bytes_left -= bytes;
349 offset = 0; 350 offset = 0;
350 } while (bytes_left); 351 } while (bytes_left);